text
stringlengths 4
1.02M
| meta
dict |
---|---|
from EXOSIMS.StarCatalog.SIMBADCatalog import SIMBADCatalog
from EXOSIMS.util.get_dirs import get_cache_dir
import os
import inspect
class SIMBAD300Catalog(SIMBADCatalog):
"""SIMBAD300 Catalog class
This class populates the star catalog used in EXOSIMS from the SIMBAD300
catalog.
"""
def __init__(self, cachedir=None, **specs):
self.cachedir = get_cache_dir(cachedir)
classpath = os.path.split(inspect.getfile(self.__class__))[0]
filename = "SIMBAD300"
pklpath = os.path.join(self.cachedir, filename + ".pkl")
matpath = os.path.join(classpath, filename + ".mat")
# check if given filename exists as .pkl file already
if os.path.exists(pklpath):
self.populatepkl(pklpath, **specs)
self.vprint("Loaded %s.pkl star catalog" % filename)
# check if given filename exists as a .mat file but not .pkl file
elif os.path.exists(matpath):
self.SIMBAD_mat2pkl(matpath, pklpath)
self.populatepkl(pklpath, **specs)
self.vprint("Loaded %s.mat star catalog" % filename)
# otherwise print error
else:
self.vprint("Could not load SIMBAD300 star catalog")
| {
"content_hash": "e465a78125bdd97c39409fc63bd39699",
"timestamp": "",
"source": "github",
"line_count": 33,
"max_line_length": 76,
"avg_line_length": 38.121212121212125,
"alnum_prop": 0.6335453100158982,
"repo_name": "dsavransky/EXOSIMS",
"id": "b2c5e2890380be1897917f4f181a5cf40f74f7f3",
"size": "1283",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "EXOSIMS/StarCatalog/SIMBAD300Catalog.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8087"
},
{
"name": "Cython",
"bytes": "2459"
},
{
"name": "Python",
"bytes": "2936469"
}
],
"symlink_target": ""
} |
NAME = "name"
TABLE = "table"
VALUE = "value"
FIELDS = "fields"
REPORT = "report"
KARYO = "karyotype"
KARYOTYPE_STRING = "KaryotypeString"
CONFIDENCE = "confidence"
VERSION = "algorithmVersion"
STARTSTOPS = "startStops"
KEY = "recordKey"
START = "startPosition"
STOP = "stopPosition"
ERR_STR = "errorString"
ERR_TYPE = "errorType"
INSUFFICIENT = "Insufficient"
MISCELLANEOUS = "Miscellaneous"
INTERMEDIATE = "Intermediate"
UNFAVORABLE = "Unfavorable"
FAVORABLE = "Favorable"
ABNORMALITIES = "Abnormalities"
WARNING = "Warning"
CYTOGENETICS = "Cytogenetics"
SWOG = "AML_SWOG_RiskCategory"
ELN = "ELN_RiskCategory"
DRI = "DRI_RiskCategory"
MUTS = "DistinctMutations"
MONOS = "DistinctMonosomies"
TRIS = "DistinctTrisomies"
UNKNOWN = "Unknown"
CELL_COUNT = "CellCount"
OFFSET = "Offset"
REF_OFF = "ReferenceOffset"
NORMAL = "Normal"
DATE = "ReceivedDate"
HYPERDU = "Hyperduploidy"
HYPODU = "Hypoduploidy"
MONO_TYPE = "MonosomalKaryotype"
CMPX_TYPE = "ComplexKaryotype"
SEX_CHRM_ABN = "SexChromosomeAbnormality" | {
"content_hash": "bd346b83eed6f709fb35b8ad076594c3",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 41,
"avg_line_length": 25.175,
"alnum_prop": 0.7497517378351539,
"repo_name": "esilgard/argos_nlp",
"id": "70cbc950f8c46771dea9243e573d3d6f380dfde5",
"size": "1068",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "fhcrc_cytogenetics/heme/global_strings.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "519"
},
{
"name": "Python",
"bytes": "158922"
}
],
"symlink_target": ""
} |
import os,random
os.environ["KERAS_BACKEND"] = "tensorflow"
import keras.backend as K
from gan_model import generator_model,discriminator_model
K.set_image_dim_ordering('th')
#os.environ["THEANO_FLAGS"] = "device=gpu%d,lib.cnmem=0"%(random.randint(0,3))
import numpy as np
import theano as th
import theano.tensor as T
from keras.utils import np_utils
import keras.models as models
from keras.layers import Input,merge
from keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten,MaxoutDense
from keras.layers.advanced_activations import LeakyReLU
from keras.activations import *
from keras.layers.wrappers import TimeDistributed
from keras.layers.noise import GaussianNoise
from keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D, UpSampling2D
from keras.layers.recurrent import LSTM
from keras.regularizers import *
from keras.layers.normalization import *
from keras.optimizers import *
from keras.datasets import mnist
import matplotlib.pyplot as plt
import seaborn as sns
import cPickle, random, sys, keras
from keras.models import Model
#from IPython import display
from keras.utils import np_utils
from tqdm import tqdm
img_rows, img_cols = 28, 28
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.reshape(X_train.shape[0], 1, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 1, img_rows, img_cols)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
print np.min(X_train), np.max(X_train)
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
shp = X_train.shape[1:]
dropout_rate = 0.25
opt = Adam(lr=1e-4)
dopt = Adam(lr=1e-3)
# opt = Adam()
# dopt = Adam()
# Build Generative model ...
nch = 200
g_input = Input(shape=[100])
H = Dense(nch*14*14, init='glorot_normal')(g_input)
H = BatchNormalization(mode=2)(H)
H = Activation('relu')(H)
H = Reshape( [nch, 14, 14] )(H)
H = UpSampling2D(size=(2, 2))(H)
H = Convolution2D(nch/2, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization(mode=2)(H)
H = Activation('relu')(H)
H = Convolution2D(nch/4, 3, 3, border_mode='same', init='glorot_uniform')(H)
H = BatchNormalization(mode=2)(H)
H = Activation('relu')(H)
H = Convolution2D(1, 1, 1, border_mode='same', init='glorot_uniform')(H)
g_V = Activation('sigmoid')(H)
g_V = generator_model(100)(g_input)
generator = Model(g_input,g_V)
generator.compile(loss='binary_crossentropy', optimizer=opt)
generator.summary()
# Build Discriminative model ...
d_input = Input(shape=shp)
H = Convolution2D(256, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(d_input)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Convolution2D(512, 5, 5, subsample=(2, 2), border_mode = 'same', activation='relu')(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
H = Flatten()(H)
H = Dense(256)(H)
H = LeakyReLU(0.2)(H)
H = Dropout(dropout_rate)(H)
d_V = Dense(2,activation='softmax')(H)
discriminator = Model(d_input,d_V)
discriminator.compile(loss='categorical_crossentropy', optimizer=dopt)
discriminator.summary()
# Freeze weights in the discriminator for stacked training
def make_trainable(net, val):
net.trainable = val
for l in net.layers:
l.trainable = val
make_trainable(discriminator, False)
# Build stacked GAN model
gan_input = Input(shape=[100])
H = generator(gan_input)
gan_V = discriminator(H)
GAN = Model(gan_input, gan_V)
GAN.compile(loss='categorical_crossentropy', optimizer=opt)
GAN.summary()
def plot_loss(losses):
# display.clear_output(wait=True)
# display.display(plt.gcf())
plt.figure(figsize=(10,8))
plt.plot(losses["d"], label='discriminitive loss')
plt.plot(losses["g"], label='generative loss')
plt.legend()
plt.show()
def plot_gen(n_ex=16,dim=(4,4), figsize=(10,10) ):
noise = np.random.uniform(0,1,size=[n_ex,100])
generated_images = generator.predict(noise)
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0],dim[1],i+1)
img = generated_images[i,0,:,:]
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.show()
ntrain = 10000
trainidx = random.sample(range(0,X_train.shape[0]), ntrain)
XT = X_train[trainidx,:,:,:]
# Pre-train the discriminator network ...
noise_gen = np.random.uniform(0,1,size=[XT.shape[0],100])
generated_images = generator.predict(noise_gen)
X = np.concatenate((XT, generated_images))
n = XT.shape[0]
y = np.zeros([2*n,2])
y[:n,1] = 1
y[n:,0] = 1
make_trainable(discriminator,True)
discriminator.fit(X,y, nb_epoch=1, batch_size=128)
y_hat = discriminator.predict(X)
# Measure accuracy of pre-trained discriminator network
y_hat_idx = np.argmax(y_hat,axis=1)
y_idx = np.argmax(y,axis=1)
diff = y_idx-y_hat_idx
n_tot = y.shape[0]
n_rig = (diff==0).sum()
acc = n_rig*100.0/n_tot
print "Accuracy: %0.02f pct (%d of %d) right"%(acc, n_rig, n_tot)
# set up loss storage vector
losses = {"d":[], "g":[]}
# Set up our main training loop
def train_for_n(nb_epoch=5000, plt_frq=25,BATCH_SIZE=32):
for e in tqdm(range(nb_epoch)):
# Make generative images
image_batch = X_train[np.random.randint(0,X_train.shape[0],size=BATCH_SIZE),:,:,:]
noise_gen = np.random.uniform(0,1,size=[BATCH_SIZE,100])
generated_images = generator.predict(noise_gen)
# Train discriminator on generated images
X = np.concatenate((image_batch, generated_images))
y = np.zeros([2*BATCH_SIZE,2])
y[0:BATCH_SIZE,1] = 1
y[BATCH_SIZE:,0] = 1
#make_trainable(discriminator,True)
d_loss = discriminator.train_on_batch(X,y)
losses["d"].append(d_loss)
# train Generator-Discriminator stack on input noise to non-generated output class
noise_tr = np.random.uniform(0,1,size=[BATCH_SIZE,100])
y2 = np.zeros([BATCH_SIZE,2])
y2[:,1] = 1
#make_trainable(discriminator,False)
g_loss = GAN.train_on_batch(noise_tr, y2 )
losses["g"].append(g_loss)
# Updates plots
if e%plt_frq==plt_frq-1:
plot_loss(losses)
plot_gen()
# Train for 6000 epochs at original learning rates
train_for_n(nb_epoch=6000, plt_frq=500,BATCH_SIZE=32)
# Train for 2000 epochs at reduced learning rates
opt.lr.set_value(1e-5)
dopt.lr.set_value(1e-4)
train_for_n(nb_epoch=2000, plt_frq=500,BATCH_SIZE=32)
# Train for 2000 epochs at reduced learning rates
opt.lr.set_value(1e-6)
dopt.lr.set_value(1e-5)
train_for_n(nb_epoch=2000, plt_frq=500,BATCH_SIZE=32)
# Plot the final loss curves
plot_loss(losses)
# Plot some generated images from our GAN
plot_gen(25,(5,5),(12,12))
def plot_real(n_ex=16,dim=(4,4), figsize=(10,10) ):
idx = np.random.randint(0,X_train.shape[0],n_ex)
generated_images = X_train[idx,:,:,:]
plt.figure(figsize=figsize)
for i in range(generated_images.shape[0]):
plt.subplot(dim[0],dim[1],i+1)
img = generated_images[i,0,:,:]
plt.imshow(img)
plt.axis('off')
plt.tight_layout()
plt.show()
# Plot real MNIST images for comparison
plot_real()
| {
"content_hash": "318dbd7b9b06497454808de2e7b9765b",
"timestamp": "",
"source": "github",
"line_count": 238,
"max_line_length": 96,
"avg_line_length": 31.205882352941178,
"alnum_prop": 0.6740271980611283,
"repo_name": "andrewv587/pycharm-project",
"id": "c3d521228ce604dee5323926f2783eee738b316e",
"size": "7599",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "GAN/mnist_gan_true.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "455546"
}
],
"symlink_target": ""
} |
import masterPlugin
import MatrixUtils
## Wrapper for MatrixUtils.initialize()
class init(masterPlugin.masterPlugin):
def __init__(this):
super().__init__()
this.command = "init"
this.aliases = None
this.commandInfo = {'requiredArguments': [[0, int, 'with'],
[1, int, 'height']],
'optionalArguments': [[0, float, 'val']],
'argumentInfo': ['the width for the new matrix',
'the height for the new matrix',
'the value for all elements in the new matrix'],
'help': """Creates a new matrix with specified dimensions, with all
elements initialized to zero, or to val if it is given"""}
def execute(this, arguments, WORKINGMATRIX):
width = arguments[0]
height = arguments[1]
val = 0
if len(arguments) == 3:
val = arguments[2]
MatrixUtils.initialize(height, width, WORKINGMATRIX, val)
def validate(this, arguments, WORKINGMATRIX):
if not super().validate(arguments, WORKINGMATRIX):
return False
if arguments[0] <= 0:
print("ERROR: width must be a positive integer")
return False
if arguments[1] <= 0:
print("ERROR: height must be a positive integer")
return False
return True
| {
"content_hash": "b76c2e9a1050f529e2f8a46a5e26f54d",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 80,
"avg_line_length": 32.523809523809526,
"alnum_prop": 0.5724743777452416,
"repo_name": "charlesdaniels/hercm",
"id": "52232eb02ac7df5ae879d39aea0fcd1110a42d79",
"size": "1366",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/python33/menuPlugins/init.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "8577"
},
{
"name": "Makefile",
"bytes": "139"
},
{
"name": "Python",
"bytes": "113163"
}
],
"symlink_target": ""
} |
"""
"""
import cv2
import numpy as np
from argparse import ArgumentParser
from simplejson import load
from os import system
from datetime import datetime, date, timedelta
try:
from urllib.request import Request, urlopen # Python 3
except ImportError:
from urllib2 import Request, urlopen # Python 2
__cloudcover__ = 15
__longitude__ = -70.881337
__latitude__ = 42.421546
__today__ = date.today()
__end_date__ = __today__.isoformat()
__start_date__ = (__today__ - timedelta(weeks=52)).isoformat()
__picker__ = 'date'
__out_file__ = 'out'
def parse_arguments(args=None):
"""
Parse command-line arguments.
"""
assert args is None or isinstance(args, list)
# Parse command-line arguments
parser = ArgumentParser(
description="Fetch SkyWatch data for a particular target."
)
default_cloudcover = __cloudcover__
parser.add_argument(
'--cloudcover', '-c', default=default_cloudcover,
nargs='?', type=int, const=default_cloudcover,
help='The maximum allowed percentage of cloud cover. ' +
'By default this is {}'.format(default_cloudcover)
)
default_longitude = __longitude__
parser.add_argument(
'--longitude', '-x', default=default_longitude,
nargs='?', type=float, const=default_longitude,
help='The target longitude. ' +
'By default this is {}'.format(default_longitude)
)
default_latitude = __latitude__
parser.add_argument(
'--latitude', '-y', default=default_latitude,
nargs='?', type=float, const=default_latitude,
help='The target latitude. ' +
'By default this is {}'.format(default_latitude)
)
default_start_date = __start_date__
parser.add_argument(
'--startdate', '-s', default=default_start_date,
nargs='?', const=default_start_date,
help='The search date range start. ' +
'By default this is "{}"'.format(default_start_date)
)
default_end_date = __end_date__
parser.add_argument(
'--enddate', '-e', default=default_end_date,
nargs='?', const=default_end_date,
help='The search date range end. ' +
'By default this is "{}"'.format(default_end_date)
)
parser.add_argument(
'--apikey', '-a', required=True, nargs='?',
help='The SkyWatch API key. There is no default.'
)
default_picker = __picker__
parser.add_argument(
'--picker', '-p', default=default_picker,
nargs='?', const=default_picker,
choices=["date", "resolution", "cloudcover"],
help='The priority field for picking data; ' +
'it can be "date", "resolution", or "cloudcover". ' +
'By default this is "{}"'.format(default_picker)
)
default_output_file = __out_file__
parser.add_argument(
'--outfile', '-o', default=default_output_file,
nargs='?', const=default_output_file,
help='The output file name. ' +
'By default this is "{}" '.format(default_output_file) +
'and the extension ".jp2" will be automatically provided.'
)
parser.add_argument(
'--verbose', '-v', action='store_true', default=True,
help='Make output more verbose, useful for debugging.'
)
return parser.parse_args(args)
def compare_for_larger(num1, num2):
"""
Compare numbers favoring bigger.
"""
return num1 > num2
def compare_for_smaller(num1, num2):
"""
Compare numbers favoring smaller.
"""
return num1 < num2
def extract_first_date(date_range_str):
"""
Pull the first date out of a SkyWatch data record.
"""
datetime_format = "%Y-%m-%dT%H:%M:%S.%f+00:00"
return datetime.strptime(date_range_str[1:33], datetime_format)
def compare_first_dates(date1str, date2str):
"""
Compare first dates for two SkyWatch data records.
"""
return compare_for_larger(extract_first_date(date1str['time']),
extract_first_date(date2str['time']))
def metadata_fetch(latitude, longitude,
start_date, end_date,
cloudcover, info_headers):
"""
Fetches the image metadata from SkyWatch.
"""
info_url = "https://api.skywatch.co/data/time/{0},{1}".format(start_date,
end_date) + \
"/location/{0},{1}/cloudcover/{2}".format(longitude, latitude,
cloudcover) + \
"/band/true-colour-image"
request = Request(info_url)
for header_name, header_data in info_headers.items():
request.add_header(header_name, header_data)
response = urlopen(request)
if response.code == 200:
info = load(response)
else:
print("Failed to download image metadata ({0}): {1}".format(
response.code, response.reason))
info = []
return info
def image_fetch(image_metadata, info_headers, verbose=False):
"""
Fetches the image itself from SkyWatch.
"""
if verbose:
print("Loading {0} byte image ({1} meter resolution)...".format(
image_metadata['size'], image_metadata['resolution']))
request = Request(image_metadata["download_path"])
for header_name, header_data in info_headers.items():
request.add_header(header_name, header_data)
response = urlopen(request)
if response.code == 200:
image_data = response.read()
else:
image_data = None
print("Failed to download image ({0}): {1}".format(
response.code, response.reason))
return image_data
def choose_and_fetch_image():
"""
Figures out the best available image to get and gets it.
"""
pickers = {
'date': compare_first_dates,
'resolution': compare_for_larger,
'cloudcover': compare_for_smaller
}
args = parse_arguments()
if args.verbose:
print("Will be searching from {0} to {1} ".format(args.startdate,
args.enddate) +
"for location ({0}, {1}) ".format(args.latitude,
args.longitude) +
"with maximum {0}% cloud cover ".format(args.cloudcover) +
"(favoring {0}).".format(args.picker))
info_headers = {
"x-api-key": args.apikey
}
metadata = metadata_fetch(args.latitude, args.longitude,
args.startdate, args.enddate,
args.cloudcover, info_headers)
picker_function = pickers[args.picker]
if len(metadata) > 0:
metadata.sort(picker_function)
chosen_image = image_fetch(metadata[0], info_headers, args.verbose)
# Store the image
if args.verbose:
"Saving file..."
try:
with open(args.outfile + '.jp2', 'wb') as output_image_file:
output_image_file.write(chosen_image)
except IOError as err:
print("Trouble saving file: {0}".format(str(err)))
filename = None
filename = args.outfile
else:
filename = None
print("No matching image found.")
if args.verbose:
"Done."
return filename
def process_image(image_file_name):
"""
Apply computer vision techinques to the image.
"""
# Here we're using a command-line utility to convert from
# the satellite JPEG 2000 + wavelet format to PPM, something
# which all OpenCV installations can process.
# On a Linux system, installing libopenjp2-tools will satisfy
# this dependency.
# Here we can use the SURF algorithm to pick out features.
# Note that you'll need to have a version of OpenCV with the
# contrib section installed; using the opencv-contrib-python
# package satisfies this dependency.
try:
system("opj_decompress -i {0}.jp2 -o {0}.ppm >> /dev/null".format(
image_file_name))
image_data = cv2.imread(image_file_name + '.ppm', 0)
surf = cv2.xfeatures2d.SURF_create()
(kps, descs) = surf.detectAndCompute(image_data, None)
print("# kps: {0}, descriptors: {1}".format(len(kps), descs.shape))
feature_image = cv2.drawKeypoints(image_data, kps,
None, (255, 0, 0), 4)
cv2.imshow("satellite image", feature_image)
cv2.waitKey(0)
cv2.destroyAllWindows()
except Exception as err:
print("Unable to use feature detection: {0}".format(err))
return False
return True
# Things to do when this module is directly run.
if __name__ == '__main__':
image_file_name = choose_and_fetch_image()
if image_file_name:
is_interesting = process_image(image_file_name)
| {
"content_hash": "392ac3381fbdf14e8ad19f2ccfa6a2c0",
"timestamp": "",
"source": "github",
"line_count": 252,
"max_line_length": 79,
"avg_line_length": 34.96825396825397,
"alnum_prop": 0.5907852927825692,
"repo_name": "hackforthesea/SatelitoVido",
"id": "66cf2728e7d490d781e3ea6b68774173bf30f74c",
"size": "8858",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "SatelitoVido.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "8858"
}
],
"symlink_target": ""
} |
"""
GDAL - Constant definitions
"""
from ctypes import (
c_double, c_float, c_int16, c_int32, c_ubyte, c_uint16, c_uint32,
)
# See https://gdal.org/api/raster_c_api.html#_CPPv412GDALDataType
GDAL_PIXEL_TYPES = {
0: 'GDT_Unknown', # Unknown or unspecified type
1: 'GDT_Byte', # Eight bit unsigned integer
2: 'GDT_UInt16', # Sixteen bit unsigned integer
3: 'GDT_Int16', # Sixteen bit signed integer
4: 'GDT_UInt32', # Thirty-two bit unsigned integer
5: 'GDT_Int32', # Thirty-two bit signed integer
6: 'GDT_Float32', # Thirty-two bit floating point
7: 'GDT_Float64', # Sixty-four bit floating point
8: 'GDT_CInt16', # Complex Int16
9: 'GDT_CInt32', # Complex Int32
10: 'GDT_CFloat32', # Complex Float32
11: 'GDT_CFloat64', # Complex Float64
}
# A list of gdal datatypes that are integers.
GDAL_INTEGER_TYPES = [1, 2, 3, 4, 5]
# Lookup values to convert GDAL pixel type indices into ctypes objects.
# The GDAL band-io works with ctypes arrays to hold data to be written
# or to hold the space for data to be read into. The lookup below helps
# selecting the right ctypes object for a given gdal pixel type.
GDAL_TO_CTYPES = [
None, c_ubyte, c_uint16, c_int16, c_uint32, c_int32,
c_float, c_double, None, None, None, None
]
# List of resampling algorithms that can be used to warp a GDALRaster.
GDAL_RESAMPLE_ALGORITHMS = {
'NearestNeighbour': 0,
'Bilinear': 1,
'Cubic': 2,
'CubicSpline': 3,
'Lanczos': 4,
'Average': 5,
'Mode': 6,
}
# See https://gdal.org/api/raster_c_api.html#_CPPv415GDALColorInterp
GDAL_COLOR_TYPES = {
0: 'GCI_Undefined', # Undefined, default value, i.e. not known
1: 'GCI_GrayIndex', # Greyscale
2: 'GCI_PaletteIndex', # Paletted
3: 'GCI_RedBand', # Red band of RGBA image
4: 'GCI_GreenBand', # Green band of RGBA image
5: 'GCI_BlueBand', # Blue band of RGBA image
6: 'GCI_AlphaBand', # Alpha (0=transparent, 255=opaque)
7: 'GCI_HueBand', # Hue band of HLS image
8: 'GCI_SaturationBand', # Saturation band of HLS image
9: 'GCI_LightnessBand', # Lightness band of HLS image
10: 'GCI_CyanBand', # Cyan band of CMYK image
11: 'GCI_MagentaBand', # Magenta band of CMYK image
12: 'GCI_YellowBand', # Yellow band of CMYK image
13: 'GCI_BlackBand', # Black band of CMLY image
14: 'GCI_YCbCr_YBand', # Y Luminance
15: 'GCI_YCbCr_CbBand', # Cb Chroma
16: 'GCI_YCbCr_CrBand', # Cr Chroma, also GCI_Max
}
# Fixed base path for buffer-based GDAL in-memory files.
VSI_FILESYSTEM_BASE_PATH = '/vsimem/'
# Should the memory file system take ownership of the buffer, freeing it when
# the file is deleted? (No, GDALRaster.__del__() will delete the buffer.)
VSI_TAKE_BUFFER_OWNERSHIP = False
# Should a VSI file be removed when retrieving its buffer?
VSI_DELETE_BUFFER_ON_READ = False
| {
"content_hash": "8cc40605638a70ddf347a2db9229452b",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 77,
"avg_line_length": 37.86842105263158,
"alnum_prop": 0.6678248783877693,
"repo_name": "koordinates/django",
"id": "2ac3872c98b616a86ec63fc6e79b6a73197ecfb0",
"size": "2878",
"binary": false,
"copies": "1",
"ref": "refs/heads/stable/3.2.x-kx",
"path": "django/contrib/gis/gdal/raster/const.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "84917"
},
{
"name": "HTML",
"bytes": "223820"
},
{
"name": "JavaScript",
"bytes": "139791"
},
{
"name": "Makefile",
"bytes": "125"
},
{
"name": "Procfile",
"bytes": "47"
},
{
"name": "Python",
"bytes": "14472067"
},
{
"name": "Shell",
"bytes": "809"
},
{
"name": "Smarty",
"bytes": "142"
}
],
"symlink_target": ""
} |
"""
WSGI config for WeddingAssist project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "WeddingAssist.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| {
"content_hash": "4d177c7da5c63d9ed4eb376185b57d23",
"timestamp": "",
"source": "github",
"line_count": 14,
"max_line_length": 78,
"avg_line_length": 28.642857142857142,
"alnum_prop": 0.7805486284289277,
"repo_name": "cjworld/WeddingAssist",
"id": "65c2c580cb827d1b59629651ef2568d0b6f233d2",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "WeddingAssist/wsgi.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "2604"
},
{
"name": "JavaScript",
"bytes": "12441"
},
{
"name": "Python",
"bytes": "25173"
}
],
"symlink_target": ""
} |
"""
Slider Cutoff
=============
This example shows how to bind a variable parameter to a slider, and how to use the corresponding bound value to color data points. This example is based on an example from the Altair 4 documentation for Interactions, in which the interactivity was accomplished using a selection. The version below has been simplified significantly through the use of a variable parameter. Variable parameters were added in Altair 5.
"""
# category: interactive charts
import altair as alt
import pandas as pd
import numpy as np
rand = np.random.RandomState(42)
df = pd.DataFrame({
'xval': range(100),
'yval': rand.randn(100).cumsum()
})
slider = alt.binding_range(min=0, max=100, step=1)
cutoff = alt.param(bind=slider, value=50)
alt.Chart(df).mark_point().encode(
x='xval',
y='yval',
color=alt.condition(
alt.datum.xval < cutoff,
alt.value('red'), alt.value('blue')
)
).add_params(
cutoff
) | {
"content_hash": "e4ff776c33da3f90bd820970bcb5d76b",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 421,
"avg_line_length": 32,
"alnum_prop": 0.7041666666666667,
"repo_name": "altair-viz/altair",
"id": "19db7749396cf26bfb33e41933058f92c0487bcd",
"size": "960",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "altair/examples/slider_cutoff.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "343"
},
{
"name": "Python",
"bytes": "5377805"
},
{
"name": "TeX",
"bytes": "2684"
}
],
"symlink_target": ""
} |
from oslo_log import log as logging
from heat.common import exception
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources import signal_responder
from heat.engine import support
from heat.scaling import scalingutil as sc_util
LOG = logging.getLogger(__name__)
class AutoScalingPolicy(signal_responder.SignalResponder):
"""A resource to manage scaling of `OS::Heat::AutoScalingGroup`.
**Note** while it may incidentally support
`AWS::AutoScaling::AutoScalingGroup` for now, please don't use it for that
purpose and use `AWS::AutoScaling::ScalingPolicy` instead.
Resource to manage scaling for `OS::Heat::AutoScalingGroup`, i.e. define
which metric should be scaled and scaling adjustment, set cooldown etc.
"""
PROPERTIES = (
AUTO_SCALING_GROUP_NAME, SCALING_ADJUSTMENT, ADJUSTMENT_TYPE,
COOLDOWN, MIN_ADJUSTMENT_STEP
) = (
'auto_scaling_group_id', 'scaling_adjustment', 'adjustment_type',
'cooldown', 'min_adjustment_step',
)
ATTRIBUTES = (
ALARM_URL, SIGNAL_URL
) = (
'alarm_url', 'signal_url'
)
properties_schema = {
# TODO(Qiming): property name should be AUTO_SCALING_GROUP_ID
AUTO_SCALING_GROUP_NAME: properties.Schema(
properties.Schema.STRING,
_('AutoScaling group ID to apply policy to.'),
required=True
),
SCALING_ADJUSTMENT: properties.Schema(
properties.Schema.NUMBER,
_('Size of adjustment.'),
required=True,
update_allowed=True
),
ADJUSTMENT_TYPE: properties.Schema(
properties.Schema.STRING,
_('Type of adjustment (absolute or percentage).'),
required=True,
constraints=[
constraints.AllowedValues(
[sc_util.CHANGE_IN_CAPACITY,
sc_util.EXACT_CAPACITY,
sc_util.PERCENT_CHANGE_IN_CAPACITY]),
],
update_allowed=True
),
COOLDOWN: properties.Schema(
properties.Schema.NUMBER,
_('Cooldown period, in seconds.'),
update_allowed=True
),
MIN_ADJUSTMENT_STEP: properties.Schema(
properties.Schema.INTEGER,
_('Minimum number of resources that are added or removed '
'when the AutoScaling group scales up or down. This can '
'be used only when specifying percent_change_in_capacity '
'for the adjustment_type property.'),
constraints=[
constraints.Range(
min=0,
),
],
update_allowed=True
),
}
attributes_schema = {
ALARM_URL: attributes.Schema(
_("A signed url to handle the alarm."),
type=attributes.Schema.STRING,
cache_mode=attributes.Schema.CACHE_NONE
),
SIGNAL_URL: attributes.Schema(
_("A url to handle the alarm using native API."),
support_status=support.SupportStatus(version='5.0.0'),
type=attributes.Schema.STRING
),
}
def validate(self):
"""Add validation for min_adjustment_step."""
super(AutoScalingPolicy, self).validate()
self._validate_min_adjustment_step()
def _validate_min_adjustment_step(self):
adjustment_type = self.properties.get(self.ADJUSTMENT_TYPE)
adjustment_step = self.properties.get(self.MIN_ADJUSTMENT_STEP)
if (adjustment_type != sc_util.PERCENT_CHANGE_IN_CAPACITY
and adjustment_step is not None):
raise exception.ResourcePropertyValueDependency(
prop1=self.MIN_ADJUSTMENT_STEP,
prop2=self.ADJUSTMENT_TYPE,
value=sc_util.PERCENT_CHANGE_IN_CAPACITY)
def handle_create(self):
super(AutoScalingPolicy, self).handle_create()
self.resource_id_set(self._get_user_id())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Updates self.properties, if Properties has changed.
If Properties has changed, update self.properties, so we get the new
values during any subsequent adjustment.
"""
if prop_diff:
self.properties = json_snippet.properties(self.properties_schema,
self.context)
def handle_signal(self, details=None):
# Template author can use scaling policy with any of the actions
# of an alarm (i.e alarm_actions, insufficient_data_actions) and
# it would be actioned irrespective of the alarm state. It's
# fair to assume that the alarm state would be the appropriate one.
# The responsibility of using a scaling policy with desired actions
# lies with the template author, though this is normally expected to
# be used with 'alarm_actions'.
#
# We also assume that the alarm state is 'alarm' when 'details' is None
# or no 'current'/'state' key in 'details'. Watchrule has upper case
# states, so we lower() them. This is only used for logging the alarm
# state.
if details is None:
alarm_state = 'alarm'
else:
alarm_state = details.get('current',
details.get('state', 'alarm')).lower()
LOG.info('Alarm %(name)s, new state %(state)s',
{'name': self.name, 'state': alarm_state})
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
group = self.stack.resource_by_refid(asgn_id)
if group is None:
raise exception.NotFound(_('Alarm %(alarm)s could not find '
'scaling group named "%(group)s"'
) % {'alarm': self.name,
'group': asgn_id})
LOG.info('%(name)s alarm, adjusting group %(group)s with id '
'%(asgn_id)s by %(filter)s',
{'name': self.name, 'group': group.name,
'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]})
with group.frozen_properties():
group.adjust(
self.properties[self.SCALING_ADJUSTMENT],
self.properties[self.ADJUSTMENT_TYPE],
self.properties[self.MIN_ADJUSTMENT_STEP],
self.properties[self.COOLDOWN])
def _resolve_attribute(self, name):
if self.resource_id is None:
return
if name == self.ALARM_URL:
return str(self._get_ec2_signed_url(never_expire=True))
elif name == self.SIGNAL_URL:
return str(self._get_heat_signal_url())
def resource_mapping():
return {
'OS::Heat::ScalingPolicy': AutoScalingPolicy,
}
| {
"content_hash": "90854fb218d381c45dcfb87feea396f2",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 79,
"avg_line_length": 38.22282608695652,
"alnum_prop": 0.586378501350775,
"repo_name": "openstack/heat",
"id": "8ca88275f33f9306c0110c9561d7ffda4b161484",
"size": "7608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "heat/engine/resources/openstack/heat/scaling_policy.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "9145593"
},
{
"name": "Shell",
"bytes": "65832"
}
],
"symlink_target": ""
} |
"""Provide methods to bootstrap a Home Assistant instance."""
import asyncio
import logging
import logging.handlers
import os
import sys
from time import time
from collections import OrderedDict
from typing import Any, Optional, Dict, Set
import voluptuous as vol
from homeassistant import core, config as conf_util, config_entries, loader
from homeassistant.const import EVENT_HOMEASSISTANT_CLOSE
from homeassistant.setup import async_setup_component
from homeassistant.util.logging import AsyncHandler
from homeassistant.util.package import async_get_user_site, is_virtual_env
from homeassistant.util.yaml import clear_secret_cache
from homeassistant.exceptions import HomeAssistantError
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = "home-assistant.log"
# hass.data key for logging information.
DATA_LOGGING = "logging"
DEBUGGER_INTEGRATIONS = {"ptvsd"}
CORE_INTEGRATIONS = ("homeassistant", "persistent_notification")
LOGGING_INTEGRATIONS = {"logger", "system_log"}
STAGE_1_INTEGRATIONS = {
# To record data
"recorder",
# To make sure we forward data to other instances
"mqtt_eventstream",
# To provide account link implementations
"cloud",
}
async def async_from_config_dict(
config: Dict[str, Any],
hass: core.HomeAssistant,
config_dir: Optional[str] = None,
enable_log: bool = True,
verbose: bool = False,
skip_pip: bool = False,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False,
) -> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = time()
if enable_log:
async_enable_logging(hass, verbose, log_rotate_days, log_file, log_no_color)
hass.config.skip_pip = skip_pip
if skip_pip:
_LOGGER.warning(
"Skipping pip installation of required modules. " "This may cause issues"
)
core_config = config.get(core.DOMAIN, {})
try:
await conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as config_err:
conf_util.async_log_exception(config_err, "homeassistant", core_config, hass)
return None
except HomeAssistantError:
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return None
# Make a copy because we are mutating it.
config = OrderedDict(config)
# Merge packages
await conf_util.merge_packages_config(
hass, config, core_config.get(conf_util.CONF_PACKAGES, {})
)
hass.config_entries = config_entries.ConfigEntries(hass, config)
await hass.config_entries.async_initialize()
await _async_set_up_integrations(hass, config)
stop = time()
_LOGGER.info("Home Assistant initialized in %.2fs", stop - start)
if sys.version_info[:3] < (3, 7, 0):
msg = (
"Python 3.6 support is deprecated and will "
"be removed in the first release after December 15, 2019. Please "
"upgrade Python to 3.7.0 or higher."
)
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Python version", "python_version"
)
return hass
async def async_from_config_file(
config_path: str,
hass: core.HomeAssistant,
verbose: bool = False,
skip_pip: bool = True,
log_rotate_days: Any = None,
log_file: Any = None,
log_no_color: bool = False,
) -> Optional[core.HomeAssistant]:
"""Read the configuration file and try to start all the functionality.
Will add functionality to 'hass' parameter.
This method is a coroutine.
"""
# Set config dir to directory holding config file
config_dir = os.path.abspath(os.path.dirname(config_path))
hass.config.config_dir = config_dir
if not is_virtual_env():
await async_mount_local_lib_path(config_dir)
async_enable_logging(hass, verbose, log_rotate_days, log_file, log_no_color)
await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass)
try:
config_dict = await hass.async_add_executor_job(
conf_util.load_yaml_config_file, config_path
)
except HomeAssistantError as err:
_LOGGER.error("Error loading %s: %s", config_path, err)
return None
finally:
clear_secret_cache()
return await async_from_config_dict(
config_dict, hass, enable_log=False, skip_pip=skip_pip
)
@core.callback
def async_enable_logging(
hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: Optional[int] = None,
log_file: Optional[str] = None,
log_no_color: bool = False,
) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = "%(asctime)s %(levelname)s (%(threadName)s) " "[%(name)s] %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
if not log_no_color:
try:
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = f"%(log_color)s{fmt}%(reset)s"
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this wil result in a no-op.
logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or (
not err_path_exists and os.access(err_dir, os.W_OK)
):
if log_rotate_days:
err_handler: logging.FileHandler = logging.handlers.TimedRotatingFileHandler(
err_log_path, when="midnight", backupCount=log_rotate_days
)
else:
err_handler = logging.FileHandler(err_log_path, mode="w", delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
async_handler = AsyncHandler(hass.loop, err_handler)
async def async_stop_async_handler(_: Any) -> None:
"""Cleanup async handler."""
logging.getLogger("").removeHandler(async_handler) # type: ignore
await async_handler.async_close(blocking=True)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_CLOSE, async_stop_async_handler)
logger = logging.getLogger("")
logger.addHandler(async_handler) # type: ignore
logger.setLevel(logging.INFO)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error("Unable to set up error log %s (access denied)", err_log_path)
async def async_mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, "deps")
lib_dir = await async_get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@core.callback
def _get_domains(hass: core.HomeAssistant, config: Dict[str, Any]) -> Set[str]:
"""Get domains of components to set up."""
# Filter out the repeating and common config section [homeassistant]
domains = set(key.split(" ")[0] for key in config.keys() if key != core.DOMAIN)
# Add config entry domains
domains.update(hass.config_entries.async_domains())
# Make sure the Hass.io component is loaded
if "HASSIO" in os.environ:
domains.add("hassio")
return domains
async def _async_set_up_integrations(
hass: core.HomeAssistant, config: Dict[str, Any]
) -> None:
"""Set up all the integrations."""
domains = _get_domains(hass, config)
# Start up debuggers. Start these first in case they want to wait.
debuggers = domains & DEBUGGER_INTEGRATIONS
if debuggers:
_LOGGER.debug("Starting up debuggers %s", debuggers)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in debuggers)
)
domains -= DEBUGGER_INTEGRATIONS
# Resolve all dependencies of all components so we can find the logging
# and integrations that need faster initialization.
resolved_domains_task = asyncio.gather(
*(loader.async_component_dependencies(hass, domain) for domain in domains),
return_exceptions=True,
)
# Set up core.
_LOGGER.debug("Setting up %s", CORE_INTEGRATIONS)
if not all(
await asyncio.gather(
*(
async_setup_component(hass, domain, config)
for domain in CORE_INTEGRATIONS
)
)
):
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return
_LOGGER.debug("Home Assistant core initialized")
# Finish resolving domains
for dep_domains in await resolved_domains_task:
# Result is either a set or an exception. We ignore exceptions
# It will be properly handled during setup of the domain.
if isinstance(dep_domains, set):
domains.update(dep_domains)
# setup components
logging_domains = domains & LOGGING_INTEGRATIONS
stage_1_domains = domains & STAGE_1_INTEGRATIONS
stage_2_domains = domains - logging_domains - stage_1_domains
if logging_domains:
_LOGGER.info("Setting up %s", logging_domains)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in logging_domains)
)
# Kick off loading the registries. They don't need to be awaited.
asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
hass.helpers.area_registry.async_get_registry(),
)
if stage_1_domains:
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in stage_1_domains)
)
# Load all integrations
after_dependencies: Dict[str, Set[str]] = {}
for int_or_exc in await asyncio.gather(
*(loader.async_get_integration(hass, domain) for domain in stage_2_domains),
return_exceptions=True,
):
# Exceptions are handled in async_setup_component.
if isinstance(int_or_exc, loader.Integration) and int_or_exc.after_dependencies:
after_dependencies[int_or_exc.domain] = set(int_or_exc.after_dependencies)
last_load = None
while stage_2_domains:
domains_to_load = set()
for domain in stage_2_domains:
after_deps = after_dependencies.get(domain)
# Load if integration has no after_dependencies or they are
# all loaded
if not after_deps or not after_deps - hass.config.components:
domains_to_load.add(domain)
if not domains_to_load or domains_to_load == last_load:
break
_LOGGER.debug("Setting up %s", domains_to_load)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in domains_to_load)
)
last_load = domains_to_load
stage_2_domains -= domains_to_load
# These are stage 2 domains that never have their after_dependencies
# satisfied.
if stage_2_domains:
_LOGGER.debug("Final set up: %s", stage_2_domains)
await asyncio.gather(
*(async_setup_component(hass, domain, config) for domain in stage_2_domains)
)
# Wrap up startup
await hass.async_block_till_done()
| {
"content_hash": "d37e876bbc54187bd65605031bd9555e",
"timestamp": "",
"source": "github",
"line_count": 390,
"max_line_length": 89,
"avg_line_length": 33.5,
"alnum_prop": 0.6421737466513586,
"repo_name": "qedi-r/home-assistant",
"id": "312c739cd7203494150ce4a60f8f18c2190ed6d2",
"size": "13065",
"binary": false,
"copies": "2",
"ref": "refs/heads/dev",
"path": "homeassistant/bootstrap.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "18564720"
},
{
"name": "Shell",
"bytes": "6846"
}
],
"symlink_target": ""
} |
from __future__ import division, print_function, absolute_import
import numpy as np
class LeastSquaresMixin(object):
"""Mixin for matrix-based Least Squares periodic analysis"""
def _construct_X(self, omega, weighted=True, **kwargs):
raise NotImplementedError()
def _construct_y(self, weighted=True, **kwargs):
raise NotImplementedError()
def _construct_X_M(self, omega, **kwargs):
"""Construct the weighted normal matrix of the problem"""
X = self._construct_X(omega, weighted=True, **kwargs)
M = np.dot(X.T, X)
if hasattr(self, 'regularization') and self.regularization is not None:
diag = M.ravel(order='K')[::M.shape[0] + 1]
if self.regularize_by_trace:
diag += diag.sum() * np.asarray(self.regularization)
else:
diag += np.asarray(self.regularization)
return X, M
def _compute_ymean(self, **kwargs):
"""Compute the (weighted) mean of the y data"""
y = np.asarray(kwargs.get('y', self.y))
dy = np.asarray(kwargs.get('dy', self.dy))
if dy.size == 1:
return np.mean(y)
else:
return np.average(y, weights=1 / dy ** 2)
def _construct_y(self, weighted=True, **kwargs):
y = kwargs.get('y', self.y)
dy = kwargs.get('dy', self.dy)
center_data = kwargs.get('center_data', self.center_data)
y = np.asarray(y)
dy = np.asarray(dy)
if center_data:
y = y - self._compute_ymean(y=y, dy=dy)
if weighted:
return y / dy
else:
return y
def _best_params(self, omega):
Xw, XTX = self._construct_X_M(omega)
XTy = np.dot(Xw.T, self.yw_)
return np.linalg.solve(XTX, XTy)
def _score(self, periods):
omegas = 2 * np.pi / periods
# Set up the reference chi2. Note that this entire function would
# be much simpler if we did not allow center_data=False.
# We keep it just to make sure our math is correct
chi2_0 = np.dot(self.yw_.T, self.yw_)
if self.center_data:
chi2_ref = chi2_0
else:
yref = self._construct_y(weighted=True, center_data=True)
chi2_ref = np.dot(yref.T, yref)
chi2_0_minus_chi2 = np.zeros(omegas.size, dtype=float)
# Iterate through the omegas and compute the power for each
for i, omega in enumerate(omegas.flat):
Xw, XTX = self._construct_X_M(omega)
XTy = np.dot(Xw.T, self.yw_)
chi2_0_minus_chi2[i] = np.dot(XTy.T, np.linalg.solve(XTX, XTy))
# construct and return the power from the chi2 difference
if self.center_data:
P = chi2_0_minus_chi2 / chi2_ref
else:
P = 1 + (chi2_0_minus_chi2 - chi2_0) / chi2_ref
return P
| {
"content_hash": "319ae4d14788f750ad04d5126614ef88",
"timestamp": "",
"source": "github",
"line_count": 85,
"max_line_length": 79,
"avg_line_length": 33.94117647058823,
"alnum_prop": 0.5691507798960138,
"repo_name": "nhuntwalker/gatspy",
"id": "6fca0b4bab5f9b896b0167cd85eabddaec9c6f0b",
"size": "2885",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gatspy/periodic/_least_squares_mixin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Makefile",
"bytes": "217"
},
{
"name": "Python",
"bytes": "114268"
}
],
"symlink_target": ""
} |
"""MyOffers URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.9/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^polls/$', views.polls_view, name='polls_view'),
url(r'^petitions/$', views.petitions_view, name='petitions_view'),
url(r'^new-post/$', views.new_post_view, name='new_post_view'),
url(r'^create-post/$', views.create_post, name='create_post'),
url(r'^update-post/$', views.update_post, name='update_post'),
url(r'^delete-post/$', views.delete_post, name='delete_post'),
url(r'^post-reaction/$', views.post_reaction, name='post_reaction'),
url(r'^post-comment/$', views.create_post_comment, name='create_post_comment'),
url(r'^post-comment-reaction/$', views.post_comment_reaction, name='post_comment_reaction'),
url(r'^reply-comment/$', views.create_reply_comment, name='create_reply_comment'),
url(r'^reply-comment-reaction/$', views.reply_comment_reaction, name='reply_comment_reaction'),
url(r'^load-post-comments/$', views.load_post_comments, name='load_post_comments'),
url(r'^load-reply-comments/$', views.load_reply_comments, name='load_reply_comments'),
url(r'^poll-response/$', views.poll_response, name='poll_response'),
] | {
"content_hash": "8a23fe22be3769ef27a8449db77ff89a",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 96,
"avg_line_length": 51.205882352941174,
"alnum_prop": 0.7122343480758185,
"repo_name": "amitdhiman000/dais",
"id": "98e71251763436abd354db84e11339abb0560598",
"size": "1741",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "post/urls.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3340"
},
{
"name": "HTML",
"bytes": "101233"
},
{
"name": "JavaScript",
"bytes": "22466"
},
{
"name": "Python",
"bytes": "94519"
}
],
"symlink_target": ""
} |
from .environment import Environment
from .learner import EpisodicLearnerMixin
from .value_function import ValueFunction
from .mdp import MDP, FixedGameMDP, GameMDP
| {
"content_hash": "2e4579b502f76d6a5381e083cc78e85f",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 43,
"avg_line_length": 41.25,
"alnum_prop": 0.8484848484848485,
"repo_name": "davidrobles/mlnd-capstone-code",
"id": "1a7e653b6a58a21c90478457766c2ab3ea966a0f",
"size": "165",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "capstone/rl/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "266"
},
{
"name": "Python",
"bytes": "150850"
}
],
"symlink_target": ""
} |
import argparse
import tables as tb
import pylab as pl
from psi.controller.calibration import util
if __name__ == '__main__':
desc = 'Convert to speaker calibration'
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('filename', type=str, help='filename')
parser.add_argument('smoothing', type=float)
args = parser.parse_args()
with tb.open_file(args.filename, 'a') as fh:
fs = fh.root._v_attrs['fs']
output_gain = fh.root._v_attrs['output_gain']
mic_sens = fh.root._v_attrs['ref_mic_sens']
discard = fh.root._v_attrs['discard']
n = fh.root._v_attrs['n']
a = fh.root.a.read()
b = fh.root.b.read()
ar = fh.root.a_response.read()[discard:, 0]
br = fh.root.b_response.read()[discard:, 0]
freq, vrms, phase = util.golay_tf(a, b, ar, br, fs)
vrms = vrms.mean(axis=0)
phase = phase.mean(axis=0)
_, sig_vrms, _ = util.golay_tf(a, b, a, b, fs)
# actual output of speaker
spl = util.db(vrms)-util.db(mic_sens)-util.db(20e-6)
# correct speaker output so it represents gain of 0 and Vrms=1
norm_spl = spl-output_gain-util.db(sig_vrms)
# Calculate sensitivity of speaker as dB (Vrms/Pa)
sens = -norm_spl-util.db(20e-6)
sens_smoothed = util.freq_smooth(freq, sens, args.smoothing)
if 'smoothed_sensitivity' in fh.root:
fh.root.smoothed_sensitivity._f_remove()
if 'smoothed_phase' in fh.root:
fh.root.smoothed_phase._f_remove()
node = fh.create_array(fh.root, 'smoothed_sensitivity', sens_smoothed)
node._v_attrs['smoothing'] = args.smoothing
node = fh.create_array(fh.root, 'smoothed_phase', sens)
node._v_attrs['smoothing'] = args.smoothing
pl.semilogx(freq, sens, 'k-')
pl.semilogx(freq, sens_smoothed, 'r-', lw=2)
pl.show()
| {
"content_hash": "90e60c3aa012d7c13a10056d9add6f93",
"timestamp": "",
"source": "github",
"line_count": 54,
"max_line_length": 78,
"avg_line_length": 35.666666666666664,
"alnum_prop": 0.6002076843198338,
"repo_name": "bburan/psiexperiment",
"id": "76ca35032b8a3e7f0036802fac00394d330dc420",
"size": "1926",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "psi/controller/calibration/make_speaker_calibration.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "468917"
}
],
"symlink_target": ""
} |
'''
Master Reborn Add-on
Copyright (C) 2017 Master Reborn
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,unicodedata
def get(title):
if title == None: return
title = title.lower()
title = re.sub('&#(\d+);', '', title)
title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title)
title = title.replace('"', '\"').replace('&', '&')
title = re.sub('\n|([[].+?[]])|([(].+?[)])|\s(vs|v[.])\s|(:|;|-|"|,|\'|\_|\.|\?)|\s', '', title).lower()
return title
def geturl(title):
if title == None: return
title = title.lower()
title = title.translate(None, ':*?"\'\.<>|&!,')
title = title.replace('/', '-')
title = title.replace(' ', '-')
title = title.replace('--', '-')
return title
def get_simple(title):
if title == None: return
title = title.lower()
title = re.sub('(\d{4})', '', title)
title = re.sub('&#(\d+);', '', title)
title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title)
title = title.replace('"', '\"').replace('&', '&')
title = re.sub('\n|\(|\)|\[|\]|\{|\}|\s(vs|v[.])\s|(:|;|-|"|,|\'|\_|\.|\?)|\s', '', title).lower()
return title
def getsearch(title):
if title == None: return
title = title.lower()
title = re.sub('&#(\d+);', '', title)
title = re.sub('(&#[0-9]+)([^;^0-9]+)', '\\1;\\2', title)
title = title.replace('"', '\"').replace('&', '&')
title = re.sub('\\\|/|\(|\)|\[|\]|\{|\}|-|:|;|\*|\?|"|\'|<|>|\_|\.|\?', ' ', title).lower()
title = ' '.join(title.split())
return title
def query(title):
if title == None: return
title = title.replace('\'', '').rsplit(':', 1)[0]
return title
def normalize(title):
try:
try: return title.decode('ascii').encode("utf-8")
except: pass
return str( ''.join(c for c in unicodedata.normalize('NFKD', unicode( title.decode('utf-8') )) if unicodedata.category(c) != 'Mn') )
except:
return title
| {
"content_hash": "7a1421854ac5cfd16a0b0c4d57df4580",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 140,
"avg_line_length": 33.98684210526316,
"alnum_prop": 0.5439411536972513,
"repo_name": "TheWardoctor/Wardoctors-repo",
"id": "e9e99311403c09f1f573fac0dba94aeaf7d4b081",
"size": "2608",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "plugin.video.master.reborn/resources/lib/modules/cleantitle.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "3208"
},
{
"name": "JavaScript",
"bytes": "115722"
},
{
"name": "Python",
"bytes": "34405207"
},
{
"name": "Shell",
"bytes": "914"
}
],
"symlink_target": ""
} |
import logging
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from dojo.filters import TestTypeFilter
from dojo.forms import Test_TypeForm
from dojo.models import Test_Type
from dojo.utils import get_page_items, add_breadcrumb
logger = logging.getLogger(__name__)
"""
Jay
Status: in prod
Test Type views
"""
@user_passes_test(lambda u: u.is_staff)
def test_type(request):
initial_queryset = Test_Type.objects.all().order_by('name')
name_words = [tt.name for tt in
initial_queryset]
test_types = TestTypeFilter(request.GET, queryset=initial_queryset)
tts = get_page_items(request, test_types.qs, 25)
add_breadcrumb(title="Test Type List", top_level=True, request=request)
return render(request, 'dojo/test_type.html', {
'name': 'Test Type List',
'metric': False,
'user': request.user,
'tts': tts,
'test_types': test_types,
'name_words': name_words})
@user_passes_test(lambda u: u.is_staff)
def add_test_type(request):
form = Test_TypeForm()
if request.method == 'POST':
form = Test_TypeForm(request.POST)
if form.is_valid():
form.save()
messages.add_message(request,
messages.SUCCESS,
'Test type added successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('test_type'))
add_breadcrumb(title="Add Test Type", top_level=False, request=request)
return render(request, 'dojo/new_test_type.html', {
'name': 'Add Test Type',
'metric': False,
'user': request.user,
'form': form,
})
@user_passes_test(lambda u: u.is_staff)
def edit_test_type(request, ptid):
tt = get_object_or_404(Test_Type, pk=ptid)
form = Test_TypeForm(instance=tt)
if request.method == 'POST':
form = Test_TypeForm(request.POST, instance=tt)
if form.is_valid():
tt = form.save()
messages.add_message(request,
messages.SUCCESS,
'Test type updated successfully.',
extra_tags='alert-success')
return HttpResponseRedirect(reverse('test_type'))
add_breadcrumb(title="Edit Test Type", top_level=False, request=request)
return render(request, 'dojo/edit_test_type.html', {
'name': 'Edit Test Type',
'metric': False,
'user': request.user,
'form': form,
'pt': tt})
| {
"content_hash": "4f265e580cee8f258efcad4c1dacda76",
"timestamp": "",
"source": "github",
"line_count": 80,
"max_line_length": 76,
"avg_line_length": 34.225,
"alnum_prop": 0.6081081081081081,
"repo_name": "OWASP/django-DefectDojo",
"id": "cc87333f5fc264cbf06700d6de7141555d2d3028",
"size": "2753",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "dojo/test_type/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "18512"
},
{
"name": "HTML",
"bytes": "848751"
},
{
"name": "JavaScript",
"bytes": "6717"
},
{
"name": "Python",
"bytes": "869791"
},
{
"name": "Ruby",
"bytes": "998"
},
{
"name": "Shell",
"bytes": "30386"
},
{
"name": "Smarty",
"bytes": "3485"
},
{
"name": "XSLT",
"bytes": "6624"
}
],
"symlink_target": ""
} |
import uuid
import unittest
try:
from unittest import mock
except ImportError:
# noinspection PyUnresolvedReferences
import mock
# As these are unit tests, we do not need the full broker capability,
# we can thus mock the DbFactory in order for it not to try and open
# the database (which is not required anyway)
with mock.patch('aquilon.aqdb.db_factory.DbFactory', autospec=True):
from aquilon.worker.templates import domain
class TestTemplateDomain(unittest.TestCase):
@staticmethod
@mock.patch.object(domain.TemplateDomain, '__init__')
def get_instance(mock_init):
mock_init.return_value = None
instance = domain.TemplateDomain('domain')
instance.domain = mock.Mock()
instance.domain.name = 'domain-name'
instance.logger = mock.Mock()
instance.author = 'author'
return instance
@mock.patch.object(domain.TemplateDomain, '_compute_formats_and_suffixes')
@mock.patch.object(domain.TemplateDomain, '_preprocess_only')
@mock.patch.object(domain.TemplateDomain, '_prepare_dirs')
def test_compile_passes_correct_exclude_and_include_to_panc(
self, mock_pd, mock_po, mock_cfas):
# This test is to ensure that correct values of panc_debug_include
# and panc_debug_exclude are used to compute and pass arguments to
# the panc compiler (run via aquilon.worker.processes.run_command()).
expected_exclude = str(uuid.uuid1())
expected_exclude_option = '-Dpanc.debug.exclude={}'.format(
expected_exclude)
expected_include = str(uuid.uuid1())
expected_include_option = '-Dpanc.debug.include={}'.format(
expected_include)
mock_pd.return_value = 'outputdir', 'templatedir'
mock_po.return_value = 'only', False # nothing_to_do must be False
mock_cfas.return_value = [], []
template_domain = self.get_instance()
patcher = mock.patch.object(domain, 'run_command')
mock_rc = patcher.start()
self.assertEqual(mock_rc.call_count, 0)
# Both exclude and include should be passed.
template_domain.compile('session',
panc_debug_include=expected_include,
panc_debug_exclude=expected_exclude)
self.assertEqual(mock_rc.call_count, 1)
self.assertIn(expected_exclude_option, mock_rc.call_args_list[0][0][0])
self.assertIn(expected_include_option, mock_rc.call_args_list[0][0][0])
# Exclude should be passed, include should not be added to panc args.
template_domain.compile('session',
panc_debug_exclude=expected_exclude)
self.assertEqual(mock_rc.call_count, 2)
self.assertIn(expected_exclude_option, mock_rc.call_args_list[1][0][0])
for o in mock_rc.call_args_list[1][0][0]:
self.assertIsNot(o.startswith('-Dpanc.debug.include'), True)
# Include should be passed, exclude should not be added to panc args.
template_domain.compile('session',
panc_debug_include=expected_include)
self.assertEqual(mock_rc.call_count, 3)
self.assertIn(expected_include_option, mock_rc.call_args_list[2][0][0])
for o in mock_rc.call_args_list[2][0][0]:
self.assertIsNot(o.startswith('-Dpanc.debug.exclude'), True)
patcher.stop()
| {
"content_hash": "890a0c6e7c581956c85d58d9646697b1",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 79,
"avg_line_length": 46.794520547945204,
"alnum_prop": 0.6487119437939111,
"repo_name": "quattor/aquilon",
"id": "682fff28a20d8d5b6e6de1267418dcd248f2149e",
"size": "4099",
"binary": false,
"copies": "1",
"ref": "refs/heads/upstream",
"path": "tests/unit/lib/aquilon/worker/templates/test_domain.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "DIGITAL Command Language",
"bytes": "1823"
},
{
"name": "Makefile",
"bytes": "5732"
},
{
"name": "Mako",
"bytes": "4178"
},
{
"name": "PLSQL",
"bytes": "102109"
},
{
"name": "PLpgSQL",
"bytes": "8091"
},
{
"name": "Pan",
"bytes": "1058"
},
{
"name": "Perl",
"bytes": "6057"
},
{
"name": "Python",
"bytes": "5884984"
},
{
"name": "SQLPL",
"bytes": "869"
},
{
"name": "Shell",
"bytes": "33547"
},
{
"name": "Smarty",
"bytes": "4603"
}
],
"symlink_target": ""
} |
import sys
sys.path.insert(1, "../../../")
import h2o, tests
def link_functions_tweedie_basic(ip,port):
print "Read in prostate data."
hdf = h2o.upload_file(h2o.locate("smalldata/prostate/prostate_complete.csv.zip"))
print "Testing for family: TWEEDIE"
print "Set variables for h2o."
y = "CAPSULE"
x = ["AGE","RACE","DCAPS","PSA","VOL","DPROS","GLEASON"]
print "Create models with canonical link: TWEEDIE"
model_h2o_tweedie = h2o.glm(x=hdf[x], y=hdf[y], family="tweedie", link="tweedie", alpha=[0.5], Lambda = [0])
print "Compare model deviances for link function tweedie (using precomputed values from R)"
deviance_h2o_tweedie = model_h2o_tweedie.residual_deviance() / model_h2o_tweedie.null_deviance()
assert 0.721452 - deviance_h2o_tweedie <= 0.01, "h2o's residual/null deviance is more than 0.01 lower than R's. h2o: " \
"{0}, r: {1}".format(deviance_h2o_tweedie, 0.721452)
if __name__ == "__main__":
tests.run_test(sys.argv, link_functions_tweedie_basic)
| {
"content_hash": "9a16e8ada585d77f7193ff0f391668ee",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 124,
"avg_line_length": 37.206896551724135,
"alnum_prop": 0.6320667284522706,
"repo_name": "bospetersen/h2o-3",
"id": "8b00ffeb5e5a11d6ea61d8fb74490e6e483f517a",
"size": "1079",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "h2o-py/tests/testdir_algos/glm/pyunit_link_functions_tweedie_basicGLM.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "5090"
},
{
"name": "CSS",
"bytes": "163561"
},
{
"name": "CoffeeScript",
"bytes": "262107"
},
{
"name": "Emacs Lisp",
"bytes": "8914"
},
{
"name": "Groovy",
"bytes": "78"
},
{
"name": "HTML",
"bytes": "146874"
},
{
"name": "Java",
"bytes": "5441396"
},
{
"name": "JavaScript",
"bytes": "88331"
},
{
"name": "Makefile",
"bytes": "31513"
},
{
"name": "Python",
"bytes": "2021301"
},
{
"name": "R",
"bytes": "1829960"
},
{
"name": "Rebol",
"bytes": "3997"
},
{
"name": "Ruby",
"bytes": "299"
},
{
"name": "Scala",
"bytes": "16336"
},
{
"name": "Shell",
"bytes": "44718"
},
{
"name": "TeX",
"bytes": "470617"
}
],
"symlink_target": ""
} |
import numpy
class BiExponentialParameters(object):
def __init__(self, tau1, tau2): # , amplitude, baseline):
assert (tau1 > 0)
assert (tau2 > 0)
# assert (tau2 > tau1)
self._tau1 = tau1
self._tau2 = tau2
self._amplitude = 1.
self._baseline = 0
# self._amplitude = amplitude
# self._baseline = baseline
@property
def tau1(self):
return self._tau1
@property
def tau2(self):
return self._tau2
@property
def amplitude(self):
return self._amplitude
@property
def baseline(self):
return self._baseline
# @tau1.setter
# def tau1(self, t):
# assert (t > 0)
# self._tau1 = t
#
# def tau2(self, t):
# assert (t > 0)
# self._tau2 = t
def kernel(self, x=None):
""" Create a kernel for the given parameters.
x may be a numpy array to be used as support.
tau1 is slow, tau2 is fast. i.e. tau1 >= tau2
If x is None, the support is chosen automatically such that the difference of the
last value of the kernel and the baseline is less than 1%."""
def biexp(x):
p = self.amplitude * (numpy.exp(-x / self.tau1) - numpy.exp(-x / self.tau2))
return p / numpy.max(p)
if x is None:
x = numpy.arange(10.)
p = biexp(x)
while p[-1] > 0.01:
x = numpy.arange(len(x) * 2)
p = biexp(x)
return p + self.baseline
else:
p = biexp(x)
if p[-1] > 0.01:
print("Warning: support for biexp may be to small.")
return p
| {
"content_hash": "2ec30d50d4e3ab017a7d56d8d067ed61",
"timestamp": "",
"source": "github",
"line_count": 63,
"max_line_length": 89,
"avg_line_length": 27.047619047619047,
"alnum_prop": 0.5134976525821596,
"repo_name": "samuroi/SamuROI",
"id": "bf12be1aca8c85a8f18338786900033a62a6dbd5",
"size": "1706",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "samuroi/event/biexponential.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "179563"
}
],
"symlink_target": ""
} |
"""Tools for working with groups
This provides several functions to work with groups and a Group class that
keeps track of the different representations and has methods to work more
easily with groups.
Author: Josef Perktold,
Author: Nathaniel Smith, recipe for sparse_dummies on scipy user mailing list
Created on Tue Nov 29 15:44:53 2011 : sparse_dummies
Created on Wed Nov 30 14:28:24 2011 : combine_indices
changes: add Group class
Notes
~~~~~
This reverses the class I used before, where the class was for the data and
the group was auxiliary. Here, it is only the group, no data is kept.
sparse_dummies needs checking for corner cases, e.g.
what if a category level has zero elements? This can happen with subset
selection even if the original groups where defined as arange.
Not all methods and options have been tried out yet after refactoring
need more efficient loop if groups are sorted -> see GroupSorted.group_iter
"""
from __future__ import print_function
from statsmodels.compat.python import lrange, lzip, range
import numpy as np
import pandas as pd
import statsmodels.tools.data as data_util
from pandas import Index, MultiIndex
def combine_indices(groups, prefix='', sep='.', return_labels=False):
"""use np.unique to get integer group indices for product, intersection
"""
if isinstance(groups, tuple):
groups = np.column_stack(groups)
else:
groups = np.asarray(groups)
dt = groups.dtype
is2d = (groups.ndim == 2) # need to store
if is2d:
ncols = groups.shape[1]
if not groups.flags.c_contiguous:
groups = np.array(groups, order='C')
groups_ = groups.view([('', groups.dtype)] * groups.shape[1])
else:
groups_ = groups
uni, uni_idx, uni_inv = np.unique(groups_, return_index=True,
return_inverse=True)
if is2d:
uni = uni.view(dt).reshape(-1, ncols)
# avoiding a view would be
# for t in uni.dtype.fields.values():
# assert (t[0] == dt)
#
# uni.dtype = dt
# uni.shape = (uni.size//ncols, ncols)
if return_labels:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
return uni_inv, uni_idx, uni, label
else:
return uni_inv, uni_idx, uni
# written for and used in try_covariance_grouploop.py
def group_sums(x, group, use_bincount=True):
"""simple bincount version, again
group : array, integer
assumed to be consecutive integers
no dtype checking because I want to raise in that case
uses loop over columns of x
for comparison, simple python loop
"""
x = np.asarray(x)
if x.ndim == 1:
x = x[:, None]
elif x.ndim > 2 and use_bincount:
raise ValueError('not implemented yet')
if use_bincount:
# re-label groups or bincount takes too much memory
if np.max(group) > 2 * x.shape[0]:
group = pd.factorize(group)[0]
return np.array([np.bincount(group, weights=x[:, col])
for col in range(x.shape[1])])
else:
uniques = np.unique(group)
result = np.zeros([len(uniques)] + list(x.shape[1:]))
for ii, cat in enumerate(uniques):
result[ii] = x[g == cat].sum(0)
return result
def group_sums_dummy(x, group_dummy):
"""sum by groups given group dummy variable
group_dummy can be either ndarray or sparse matrix
"""
if data_util._is_using_ndarray_type(group_dummy, None):
return np.dot(x.T, group_dummy)
else: # check for sparse
return x.T * group_dummy
def dummy_sparse(groups):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups: ndarray, int, 1d (nobs,)
an array of group indicators for each observation. Group levels are
assumed to be defined as consecutive integers, i.e. range(n_groups)
where n_groups is the number of group levels. A group level with no
observations for it will still produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
from scipy import sparse
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
indi = sparse.csr_matrix((data, g, indptr))
return indi
class Group(object):
def __init__(self, group, name=''):
# self.group = np.asarray(group) # TODO: use checks in combine_indices
self.name = name
uni, uni_idx, uni_inv = combine_indices(group)
# TODO: rename these to something easier to remember
self.group_int, self.uni_idx, self.uni = uni, uni_idx, uni_inv
self.n_groups = len(self.uni)
# put this here so they can be overwritten before calling labels
self.separator = '.'
self.prefix = self.name
if self.prefix:
self.prefix = self.prefix + '='
# cache decorator
def counts(self):
return np.bincount(self.group_int)
# cache_decorator
def labels(self):
# is this only needed for product of groups (intersection)?
prefix = self.prefix
uni = self.uni
sep = self.separator
if uni.ndim > 1:
label = [(prefix+sep.join(['%s']*len(uni[0]))) % tuple(ii)
for ii in uni]
else:
label = [prefix + '%s' % ii for ii in uni]
return label
def dummy(self, drop_idx=None, sparse=False, dtype=int):
"""
drop_idx is only available if sparse=False
drop_idx is supposed to index into uni
"""
uni = self.uni
if drop_idx is not None:
idx = lrange(len(uni))
del idx[drop_idx]
uni = uni[idx]
group = self.group
if not sparse:
return (group[:, None] == uni[None, :]).astype(dtype)
else:
return dummy_sparse(self.group_int)
def interaction(self, other):
if isinstance(other, self.__class__):
other = other.group
return self.__class__((self, other))
def group_sums(self, x, use_bincount=True):
return group_sums(x, self.group_int, use_bincount=use_bincount)
def group_demean(self, x, use_bincount=True):
nobs = float(len(x))
means_g = group_sums(x / nobs, self.group_int,
use_bincount=use_bincount)
x_demeaned = x - means_g[self.group_int] # check reverse_index?
return x_demeaned, means_g
class GroupSorted(Group):
def __init__(self, group, name=''):
super(self.__class__, self).__init__(group, name=name)
idx = (np.nonzero(np.diff(group))[0]+1).tolist()
self.groupidx = lzip([0] + idx, idx + [len(group)])
def group_iter(self):
for low, upp in self.groupidx:
yield slice(low, upp)
def lag_indices(self, lag):
"""return the index array for lagged values
Warning: if k is larger then the number of observations for an
individual, then no values for that individual are returned.
TODO: for the unbalanced case, I should get the same truncation for
the array with lag=0. From the return of lag_idx we wouldn't know
which individual is missing.
TODO: do I want the full equivalent of lagmat in tsa?
maxlag or lag or lags.
not tested yet
"""
lag_idx = np.asarray(self.groupidx)[:, 1] - lag # asarray or already?
mask_ok = (lag <= lag_idx)
# still an observation that belongs to the same individual
return lag_idx[mask_ok]
def _is_hierarchical(x):
"""
Checks if the first item of an array-like object is also array-like
If so, we have a MultiIndex and returns True. Else returns False.
"""
item = x[0]
# is there a better way to do this?
if isinstance(item, (list, tuple, np.ndarray, pd.Series, pd.DataFrame)):
return True
else:
return False
def _make_hierarchical_index(index, names):
return MultiIndex.from_tuples(*[index], names=names)
def _make_generic_names(index):
n_names = len(index.names)
pad = str(len(str(n_names))) # number of digits
return [("group{0:0"+pad+"}").format(i) for i in range(n_names)]
class Grouping(object):
def __init__(self, index, names=None):
"""
index : index-like
Can be pandas MultiIndex or Index or array-like. If array-like
and is a MultipleIndex (more than one grouping variable),
groups are expected to be in each row. E.g., [('red', 1),
('red', 2), ('green', 1), ('green', 2)]
names : list or str, optional
The names to use for the groups. Should be a str if only
one grouping variable is used.
Notes
-----
If index is already a pandas Index then there is no copy.
"""
if isinstance(index, (Index, MultiIndex)):
if names is not None:
if hasattr(index, 'set_names'): # newer pandas
index.set_names(names, inplace=True)
else:
index.names = names
self.index = index
else: # array-like
if _is_hierarchical(index):
self.index = _make_hierarchical_index(index, names)
else:
self.index = Index(index, name=names)
if names is None:
names = _make_generic_names(self.index)
if hasattr(self.index, 'set_names'):
self.index.set_names(names, inplace=True)
else:
self.index.names = names
self.nobs = len(self.index)
self.nlevels = len(self.index.names)
self.slices = None
@property
def index_shape(self):
if hasattr(self.index, 'levshape'):
return self.index.levshape
else:
return self.index.shape
@property
def levels(self):
if hasattr(self.index, 'levels'):
return self.index.levels
else:
return pd.Categorical(self.index).levels
@property
def labels(self):
# this was index_int, but that's not a very good name...
if hasattr(self.index, 'labels'):
return self.index.labels
else: # pandas version issue here
# Compat code for the labels -> codes change in pandas 0.15
# FIXME: use .codes directly when we don't want to support
# pandas < 0.15
tmp = pd.Categorical(self.index)
try:
labl = tmp.codes
except AttributeError:
labl = tmp.labels # Old pandsd
return labl[None]
@property
def group_names(self):
return self.index.names
def reindex(self, index=None, names=None):
"""
Resets the index in-place.
"""
# NOTE: this isn't of much use if the rest of the data doesn't change
# This needs to reset cache
if names is None:
names = self.group_names
self = Grouping(index, names)
def get_slices(self, level=0):
"""
Sets the slices attribute to be a list of indices of the sorted
groups for the first index level. I.e., self.slices[0] is the
index where each observation is in the first (sorted) group.
"""
# TODO: refactor this
groups = self.index.get_level_values(level).unique()
groups = np.array(groups)
groups.sort()
if isinstance(self.index, MultiIndex):
self.slices = [self.index.get_loc_level(x, level=level)[0]
for x in groups]
else:
self.slices = [self.index.get_loc(x) for x in groups]
def count_categories(self, level=0):
"""
Sets the attribute counts to equal the bincount of the (integer-valued)
labels.
"""
# TODO: refactor this not to set an attribute. Why would we do this?
self.counts = np.bincount(self.labels[level])
def check_index(self, is_sorted=True, unique=True, index=None):
"""Sanity checks"""
if not index:
index = self.index
if is_sorted:
test = pd.DataFrame(lrange(len(index)), index=index)
test_sorted = test.sort()
if not test.index.equals(test_sorted.index):
raise Exception('Data is not be sorted')
if unique:
if len(index) != len(index.unique()):
raise Exception('Duplicate index entries')
def sort(self, data, index=None):
"""Applies a (potentially hierarchical) sort operation on a numpy array
or pandas series/dataframe based on the grouping index or a
user-supplied index. Returns an object of the same type as the
original data as well as the matching (sorted) Pandas index.
"""
if index is None:
index = self.index
if data_util._is_using_ndarray_type(data, None):
if data.ndim == 1:
out = pd.Series(data, index=index, copy=True)
out = out.sort_index()
else:
out = pd.DataFrame(data, index=index)
out = out.sort_index(inplace=False) # copies
return np.array(out), out.index
elif data_util._is_using_pandas(data, None):
out = data
out = out.reindex(index) # copies?
out = out.sort_index()
return out, out.index
else:
msg = 'data must be a Numpy array or a Pandas Series/DataFrame'
raise ValueError(msg)
def transform_dataframe(self, dataframe, function, level=0, **kwargs):
"""Apply function to each column, by group
Assumes that the dataframe already has a proper index"""
if dataframe.shape[0] != self.nobs:
raise Exception('dataframe does not have the same shape as index')
out = dataframe.groupby(level=level).apply(function, **kwargs)
if 1 in out.shape:
return np.ravel(out)
else:
return np.array(out)
def transform_array(self, array, function, level=0, **kwargs):
"""Apply function to each column, by group
"""
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
dataframe = pd.DataFrame(array, index=self.index)
return self.transform_dataframe(dataframe, function, level=level,
**kwargs)
def transform_slices(self, array, function, level=0, **kwargs):
"""Apply function to each group. Similar to transform_array but does
not coerce array to a DataFrame and back and only works on a 1D or 2D
numpy array. function is called function(group, group_idx, **kwargs).
"""
array = np.asarray(array)
if array.shape[0] != self.nobs:
raise Exception('array does not have the same shape as index')
# always reset because level is given. need to refactor this.
self.get_slices(level=level)
processed = []
for s in self.slices:
if array.ndim == 2:
subset = array[s, :]
elif array.ndim == 1:
subset = array[s]
processed.append(function(subset, s, **kwargs))
processed = np.array(processed)
return processed.reshape(-1, processed.shape[-1])
# TODO: this isn't general needs to be a PanelGrouping object
def dummies_time(self):
self.dummy_sparse(level=1)
return self._dummies
def dummies_groups(self, level=0):
self.dummy_sparse(level=level)
return self._dummies
def dummy_sparse(self, level=0):
"""create a sparse indicator from a group array with integer labels
Parameters
----------
groups: ndarray, int, 1d (nobs,) an array of group indicators for each
observation. Group levels are assumed to be defined as consecutive
integers, i.e. range(n_groups) where n_groups is the number of
group levels. A group level with no observations for it will still
produce a column of zeros.
Returns
-------
indi : ndarray, int8, 2d (nobs, n_groups)
an indicator array with one row per observation, that has 1 in the
column of the group level for that observation
Examples
--------
>>> g = np.array([0, 0, 2, 1, 1, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi
<7x3 sparse matrix of type '<type 'numpy.int8'>'
with 7 stored elements in Compressed Sparse Row format>
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
current behavior with missing groups
>>> g = np.array([0, 0, 2, 0, 2, 0])
>>> indi = dummy_sparse(g)
>>> indi.todense()
matrix([[1, 0, 0],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0],
[0, 0, 1],
[1, 0, 0]], dtype=int8)
"""
from scipy import sparse
groups = self.labels[level]
indptr = np.arange(len(groups)+1)
data = np.ones(len(groups), dtype=np.int8)
self._dummies = sparse.csr_matrix((data, groups, indptr))
if __name__ == '__main__':
# ---------- examples combine_indices
from numpy.testing import assert_equal
np.random.seed(985367)
groups = np.random.randint(0, 2, size=(10, 2))
uv, ux, u, label = combine_indices(groups, return_labels=True)
uv, ux, u, label = combine_indices(groups, prefix='g1,g2=', sep=',',
return_labels=True)
group0 = np.array(['sector0', 'sector1'])[groups[:, 0]]
group1 = np.array(['region0', 'region1'])[groups[:, 1]]
uv, ux, u, label = combine_indices((group0, group1),
prefix='sector,region=',
sep=',',
return_labels=True)
uv, ux, u, label = combine_indices((group0, group1), prefix='', sep='.',
return_labels=True)
group_joint = np.array(label)[uv]
group_joint_expected = np.array(['sector1.region0', 'sector0.region1',
'sector0.region0', 'sector0.region1',
'sector1.region1', 'sector0.region0',
'sector1.region0', 'sector1.region0',
'sector0.region1', 'sector0.region0'],
dtype='|S15')
assert_equal(group_joint, group_joint_expected)
"""
>>> uv
array([2, 1, 0, 0, 1, 0, 2, 0, 1, 0])
>>> label
['sector0.region0', 'sector1.region0', 'sector1.region1']
>>> np.array(label)[uv]
array(['sector1.region1', 'sector1.region0', 'sector0.region0',
'sector0.region0', 'sector1.region0', 'sector0.region0',
'sector1.region1', 'sector0.region0', 'sector1.region0',
'sector0.region0'],
dtype='|S15')
>>> np.column_stack((group0, group1))
array([['sector1', 'region1'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0'],
['sector1', 'region1'],
['sector0', 'region0'],
['sector1', 'region0'],
['sector0', 'region0']],
dtype='|S7')
"""
# ------------- examples sparse_dummies
from scipy import sparse
g = np.array([0, 0, 1, 2, 1, 1, 2, 0])
u = lrange(3)
indptr = np.arange(len(g)+1)
data = np.ones(len(g), dtype=np.int8)
a = sparse.csr_matrix((data, g, indptr))
print(a.todense())
print(np.all(a.todense() == (g[:, None] == np.arange(3)).astype(int)))
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
print('group means')
print(x.T * a)
print(np.dot(x.T, g[:, None] == np.arange(3)))
print(np.array([np.bincount(g, weights=x[:, col]) for col in range(3)]))
for cat in u:
print(x[g == cat].sum(0))
for cat in u:
x[g == cat].sum(0)
cc = sparse.csr_matrix([[0, 1, 0, 1, 0, 0, 0, 0, 0],
[1, 0, 1, 0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 0, 1, 0, 0],
[0, 1, 0, 1, 0, 1, 0, 1, 0],
[0, 0, 1, 0, 1, 0, 0, 0, 1],
[0, 0, 0, 1, 0, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 0, 0, 1, 0, 1, 0]])
# ------------- groupsums
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2), g,
use_bincount=False).T)
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2)[:, :, 0], g))
print(group_sums(np.arange(len(g)*3*2).reshape(len(g), 3, 2)[:, :, 1], g))
# ------------- examples class
x = np.arange(len(g)*3).reshape(len(g), 3, order='F')
mygroup = Group(g)
print(mygroup.group_int)
print(mygroup.group_sums(x))
print(mygroup.labels())
| {
"content_hash": "42c9bb9c5a8dd9c82ac451b554e4a58a",
"timestamp": "",
"source": "github",
"line_count": 657,
"max_line_length": 79,
"avg_line_length": 34.207001522070016,
"alnum_prop": 0.551570703924535,
"repo_name": "ChadFulton/statsmodels",
"id": "110e9dc461b81e3657ca786684f8f45b02e9db4f",
"size": "22498",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "statsmodels/tools/grouputils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AGS Script",
"bytes": "457842"
},
{
"name": "Assembly",
"bytes": "10035"
},
{
"name": "Batchfile",
"bytes": "3469"
},
{
"name": "C",
"bytes": "381"
},
{
"name": "HTML",
"bytes": "148470"
},
{
"name": "MATLAB",
"bytes": "2609"
},
{
"name": "Python",
"bytes": "11749760"
},
{
"name": "R",
"bytes": "90986"
},
{
"name": "Rebol",
"bytes": "123"
},
{
"name": "Shell",
"bytes": "8181"
},
{
"name": "Smarty",
"bytes": "1014"
},
{
"name": "Stata",
"bytes": "65045"
}
],
"symlink_target": ""
} |
import os
from setuptools import setup
if __name__ == "__main__":
import scatterbrane
setup(
name="scatterbrane",
version = "0.1.0",
packages = ["scatterbrane"],
author = "Katherine Rosenfeld",
author_email = "krosenf@gmail.com",
description = ("A python module to simulate the effect of anisotropic scattering "
"on astrophysical images."),
license = "MIT",
keywords = "scattering astronomy EHT"
)
| {
"content_hash": "f01feb972a8f841c518e29b4cd10d557",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 90,
"avg_line_length": 31,
"alnum_prop": 0.5826612903225806,
"repo_name": "krosenfeld/scatterbrane",
"id": "5e04c050afda6cdd6dc71b90308bde788d22d9f7",
"size": "496",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "40405"
}
],
"symlink_target": ""
} |
"""Weapon.py: Contains all weapon classes. Each class controls behavior for a specific weapon."""
__author__ = "Scott Munro"
__copyright__ = "Copyright 2015"
import bullet
import game_constants
class Weapon(object):
def fire(self):
raise Exception("Must implement 'Fire' method")
def reload(self):
raise Exception("Must implement 'reload' method")
class Pistol(Weapon):
def __init__(self):
self.ammo_left = game_constants.PISTOL_CLIP_CAPACITY # full clip
self.key = game_constants.PISTOL_KEY
def fire(self, x, y, rotation):
if self.ammo_left > 0:
self.ammo_left -= 1
return bullet.PistolBullet(x, y, rotation)
return None
def reload(self):
self.ammo_left = 6
# class Shotgun( Weapon ):
# class Crossbow( Weapon ):
| {
"content_hash": "4280428926f71a7029326e5cb0f893a2",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 97,
"avg_line_length": 23.82857142857143,
"alnum_prop": 0.6294964028776978,
"repo_name": "scottnm/itchnscratch",
"id": "98344d083cea59d5e7a1792ce45e2ec6f9d89e77",
"size": "857",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Weapon.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "21774"
},
{
"name": "Shell",
"bytes": "39"
}
],
"symlink_target": ""
} |
"""
Support for Vanderbilt (formerly Siemens) SPC alarm systems.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/spc/
"""
import logging
import asyncio
import json
from urllib.parse import urljoin
import aiohttp
import async_timeout
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers import discovery
from homeassistant.const import (
STATE_UNKNOWN, STATE_ON, STATE_OFF, STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME, STATE_ALARM_DISARMED, STATE_ALARM_TRIGGERED,
STATE_UNAVAILABLE)
DOMAIN = 'spc'
REQUIREMENTS = ['websockets==3.2']
_LOGGER = logging.getLogger(__name__)
ATTR_DISCOVER_DEVICES = 'devices'
ATTR_DISCOVER_AREAS = 'areas'
CONF_WS_URL = 'ws_url'
CONF_API_URL = 'api_url'
DATA_REGISTRY = 'spc_registry'
DATA_API = 'spc_api'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_WS_URL): cv.string,
vol.Required(CONF_API_URL): cv.string
}),
}, extra=vol.ALLOW_EXTRA)
@asyncio.coroutine
def async_setup(hass, config):
"""Setup the SPC platform."""
hass.data[DATA_REGISTRY] = SpcRegistry()
api = SpcWebGateway(hass,
config[DOMAIN].get(CONF_API_URL),
config[DOMAIN].get(CONF_WS_URL))
hass.data[DATA_API] = api
# add sensor devices for each zone (typically motion/fire/door sensors)
zones = yield from api.get_zones()
if zones:
hass.async_add_job(discovery.async_load_platform(
hass, 'binary_sensor', DOMAIN,
{ATTR_DISCOVER_DEVICES: zones}, config))
# create a separate alarm panel for each area
areas = yield from api.get_areas()
if areas:
hass.async_add_job(discovery.async_load_platform(
hass, 'alarm_control_panel', DOMAIN,
{ATTR_DISCOVER_AREAS: areas}, config))
# start listening for incoming events over websocket
api.start_listener(_async_process_message, hass.data[DATA_REGISTRY])
return True
@asyncio.coroutine
def _async_process_message(sia_message, spc_registry):
spc_id = sia_message['sia_address']
sia_code = sia_message['sia_code']
# BA - Burglary Alarm
# CG - Close Area
# NL - Perimeter Armed
# OG - Open Area
# ZO - Zone Open
# ZC - Zone Close
# ZX - Zone Short
# ZD - Zone Disconnected
extra = {}
if sia_code in ('BA', 'CG', 'NL', 'OG'):
# change in area status, notify alarm panel device
device = spc_registry.get_alarm_device(spc_id)
data = sia_message['description'].split('¦')
if len(data) == 3:
extra['changed_by'] = data[1]
else:
# change in zone status, notify sensor device
device = spc_registry.get_sensor_device(spc_id)
sia_code_to_state_map = {'BA': STATE_ALARM_TRIGGERED,
'CG': STATE_ALARM_ARMED_AWAY,
'NL': STATE_ALARM_ARMED_HOME,
'OG': STATE_ALARM_DISARMED,
'ZO': STATE_ON,
'ZC': STATE_OFF,
'ZX': STATE_UNKNOWN,
'ZD': STATE_UNAVAILABLE}
new_state = sia_code_to_state_map.get(sia_code, None)
if new_state and not device:
_LOGGER.warning("No device mapping found for SPC area/zone id %s.",
spc_id)
elif new_state:
yield from device.async_update_from_spc(new_state, extra)
class SpcRegistry:
"""Maintains mappings between SPC zones/areas and HA entities."""
def __init__(self):
"""Initialize the registry."""
self._zone_id_to_sensor_map = {}
self._area_id_to_alarm_map = {}
def register_sensor_device(self, zone_id, device):
"""Add a sensor device to the registry."""
self._zone_id_to_sensor_map[zone_id] = device
def get_sensor_device(self, zone_id):
"""Retrieve a sensor device for a specific zone."""
return self._zone_id_to_sensor_map.get(zone_id, None)
def register_alarm_device(self, area_id, device):
"""Add an alarm device to the registry."""
self._area_id_to_alarm_map[area_id] = device
def get_alarm_device(self, area_id):
"""Retrieve an alarm device for a specific area."""
return self._area_id_to_alarm_map.get(area_id, None)
@asyncio.coroutine
def _ws_process_message(message, async_callback, *args):
if message.get('status', '') != 'success':
_LOGGER.warning("Unsuccessful websocket message "
"delivered, ignoring: %s", message)
try:
yield from async_callback(message['data']['sia'], *args)
except: # pylint: disable=bare-except
_LOGGER.exception("Exception in callback, ignoring.")
class SpcWebGateway:
"""Simple binding for the Lundix SPC Web Gateway REST API."""
AREA_COMMAND_SET = 'set'
AREA_COMMAND_PART_SET = 'set_a'
AREA_COMMAND_UNSET = 'unset'
def __init__(self, hass, api_url, ws_url):
"""Initialize the web gateway client."""
self._hass = hass
self._api_url = api_url
self._ws_url = ws_url
self._ws = None
@asyncio.coroutine
def get_zones(self):
"""Retrieve all available zones."""
return (yield from self._get_data('zone'))
@asyncio.coroutine
def get_areas(self):
"""Retrieve all available areas."""
return (yield from self._get_data('area'))
@asyncio.coroutine
def send_area_command(self, area_id, command):
"""Send an area command."""
_LOGGER.debug("Sending SPC area command '%s' to area %s.",
command, area_id)
resource = "area/{}/{}".format(area_id, command)
return (yield from self._call_web_gateway(resource, use_get=False))
def start_listener(self, async_callback, *args):
"""Start the websocket listener."""
try:
from asyncio import ensure_future
except ImportError:
from asyncio import async as ensure_future
ensure_future(self._ws_listen(async_callback, *args))
def _build_url(self, resource):
return urljoin(self._api_url, "spc/{}".format(resource))
@asyncio.coroutine
def _get_data(self, resource):
data = yield from self._call_web_gateway(resource)
if not data:
return False
if data['status'] != 'success':
_LOGGER.error("SPC Web Gateway call unsuccessful "
"for resource '%s'.", resource)
return False
return [item for item in data['data'][resource]]
@asyncio.coroutine
def _call_web_gateway(self, resource, use_get=True):
response = None
session = None
url = self._build_url(resource)
try:
_LOGGER.debug("Attempting to retrieve SPC data from %s.", url)
session = aiohttp.ClientSession()
with async_timeout.timeout(10, loop=self._hass.loop):
action = session.get if use_get else session.put
response = yield from action(url)
if response.status != 200:
_LOGGER.error("SPC Web Gateway returned http "
"status %d, response %s.",
response.status, (yield from response.text()))
return False
result = yield from response.json()
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting SPC data from %s.", url)
return False
except aiohttp.ClientError:
_LOGGER.exception("Error getting SPC data from %s.", url)
return False
finally:
if session:
yield from session.close()
if response:
yield from response.release()
_LOGGER.debug("Data from SPC: %s", result)
return result
@asyncio.coroutine
def _ws_read(self):
import websockets as wslib
try:
if not self._ws:
self._ws = yield from wslib.connect(self._ws_url)
_LOGGER.info("Connected to websocket at %s.", self._ws_url)
except Exception as ws_exc: # pylint: disable=broad-except
_LOGGER.error("Failed to connect to websocket: %s", ws_exc)
return
result = None
try:
result = yield from self._ws.recv()
_LOGGER.debug("Data from websocket: %s", result)
except Exception as ws_exc: # pylint: disable=broad-except
_LOGGER.error("Failed to read from websocket: %s", ws_exc)
try:
yield from self._ws.close()
finally:
self._ws = None
return result
@asyncio.coroutine
def _ws_listen(self, async_callback, *args):
try:
while True:
result = yield from self._ws_read()
if result:
yield from _ws_process_message(json.loads(result),
async_callback, *args)
else:
_LOGGER.info("Trying again in 30 seconds.")
yield from asyncio.sleep(30)
finally:
if self._ws:
yield from self._ws.close()
| {
"content_hash": "6140c4432d44765161307434bf8107d9",
"timestamp": "",
"source": "github",
"line_count": 283,
"max_line_length": 76,
"avg_line_length": 33.169611307420496,
"alnum_prop": 0.5811228294449771,
"repo_name": "ewandor/home-assistant",
"id": "c186559c91a4c4758b52f0ecac6ff6a54534de7a",
"size": "9388",
"binary": false,
"copies": "3",
"ref": "refs/heads/dev",
"path": "homeassistant/components/spc.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "8860790"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "12639"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals, division
from datetime import date
import uuid
from django.db import models
from django.core.exceptions import ValidationError
from django.dispatch import receiver
from django.db.models.signals import pre_save
from ..users.models import User
from ..reviews.models import Wine, Review
#### Helper Functions ###
def validate_image_size(image):
"""Validator: Ensures that image uploads are no greater than 2MB"""
file_size = image.file.size
file_size_in_MB = file_size/(1024 * 1024)
megabyte_upload_limit = 2.0
if file_size > megabyte_upload_limit * 1024 * 1024:
raise ValidationError(
"Max file size is %sMB, this is %.2fMB" % (megabyte_upload_limit, file_size_in_MB))
def image_upload_path(instance, filename):
"""Gives unique name to the uploaded image and a path based on date."""
file_extension = filename.split('.')[-1]
new_file_name = "%s.%s" % (uuid.uuid4(), file_extension)
year_month_path = date.today().strftime('%Y/%m/')
shot_path = year_month_path + new_file_name
full_path = instance.get_upload_path(shot_path)
return full_path
### Abstract Image Model ###
class BasicImage(models.Model):
"""Defines a basic image. Must have a user, and an Image.
Since validation is not enforced at the db level by django, and I want it to be,
we will need to override the save method to enforce image size.
"""
user = models.ForeignKey(User)
shot = models.ImageField(upload_to=image_upload_path, validators=[validate_image_size])
def save(self, *args, **kwargs):
file_size = float(self.shot.size)/float(1024*1024)
if file_size > 2.0:
raise ValidationError(
{'shot': "Max file size is 2.0MB, this is %.2fMB" % file_size,}
)
else:
super(BasicImage, self).save(*args, **kwargs)
class Meta:
abstract=True
### Concrete Image Models ###
class WineImage(BasicImage):
"""Defines a wine bottle image.
It is just like a basic image, but prepends a path that directs it to the proper folder.
Also: It is associated with a wine, and must be associated with a review.
"""
wine = models.ForeignKey(Wine, null=True, blank=True, on_delete=models.CASCADE,
related_name='wineimages')
review = models.ForeignKey(Review, models.SET_NULL, null=True,
related_name='wineimages')
def get_upload_path(self, filename):
return "bottle_shots/%s" % filename
class ArticleImage(BasicImage):
"""Defines an Article image.
It is just like a basic image, but prepends a path that directs it to the proper folder.
"""
def get_upload_path(self, filename):
return "article_images/%s" % filename
| {
"content_hash": "f856b19e3db4d706e501674acb7eaa6f",
"timestamp": "",
"source": "github",
"line_count": 79,
"max_line_length": 95,
"avg_line_length": 35.64556962025316,
"alnum_prop": 0.6598011363636364,
"repo_name": "REBradley/WineArb",
"id": "875cbd75f847beab942755c52fffddbbff40047a",
"size": "2816",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "winearb/upload_handling/models.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "3402"
},
{
"name": "HTML",
"bytes": "46800"
},
{
"name": "JavaScript",
"bytes": "2423482"
},
{
"name": "Python",
"bytes": "94891"
},
{
"name": "Shell",
"bytes": "4200"
}
],
"symlink_target": ""
} |
"""
The following code is used to convert bytes to be human readable.
It was found on the Internet...
"""
import math
import string
import sys
import inspect
import zipfile
import tarfile
import logging
if sys.version_info >= (3, 0):
long = int
class human_readable(long):
"""
define a human_readable class to allow custom formatting
format specifiers supported :
em : formats the size as bits in IEC format i.e. 1024 bits (128 bytes) = 1Kib
eM : formats the size as Bytes in IEC format i.e. 1024 bytes = 1KiB
sm : formats the size as bits in SI format i.e. 1000 bits = 1kb
sM : formats the size as bytes in SI format i.e. 1000 bytes = 1KB
cm : format the size as bit in the common format i.e. 1024 bits (128 bytes) = 1Kb
cM : format the size as bytes in the common format i.e. 1024 bytes = 1KB
code from: http://code.activestate.com/recipes/578323-human-readable-filememory-sizes-v2/
"""
def __format__(self, fmt):
# is it an empty format or not a special format for the size class
if fmt == "" or fmt[-2:].lower() not in ["em", "sm", "cm"]:
if fmt[-1].lower() in ['b', 'c', 'd', 'o', 'x', 'n', 'e', 'f', 'g', '%']:
# Numeric format.
return long(self).__format__(fmt)
else:
return str(self).__format__(fmt)
# work out the scale, suffix and base
factor, suffix = (8, "b") if fmt[-1] in string.lowercase else (1, "B")
base = 1024 if fmt[-2] in ["e", "c"] else 1000
# Add the i for the IEC format
suffix = "i" + suffix if fmt[-2] == "e" else suffix
mult = ["", "K", "M", "G", "T", "P"]
val = float(self) * factor
i = 0 if val < 1 else int(math.log(val, base)) + 1
v = val / math.pow(base, i)
v, i = (v, i) if v > 0.5 else (v * base, i - 1)
# Identify if there is a width and extract it
width = "" if fmt.find(".") == -1 else fmt[:fmt.index(".")]
precis = fmt[:-2] if width == "" else fmt[fmt.index("."):-2]
# do the precision bit first, so width/alignment works with the suffix
if float(self) == 0:
return "{0:{1}f}".format(v, precis)
t = ("{0:{1}f}" + mult[i] + suffix).format(v, precis)
return "{0:{1}}".format(t, width) if width != "" else t
def currentframe():
"""Return the frame object for the caller's stack frame."""
try:
raise Exception
except:
return sys.exc_info()[2].tb_frame.f_back
class BUIlogging(object):
def _logger(self, level, *args):
if self.logger:
"""
Try to guess where was call the function
"""
cf = currentframe()
(frame, filename, line_number, function_name, lines, index) = inspect.getouterframes(cf)[1]
if cf is not None:
cf = cf.f_back
"""
Ugly hack to reformat the message
"""
ar = list(args)
if isinstance(ar[0], str):
ar[0] = filename + ':' + str(cf.f_lineno) + ' => ' + ar[0]
else:
ar = [filename + ':' + str(cf.f_lineno) + ' => {0}'.format(ar)]
args = tuple(ar)
self.logger.log(logging.getLevelName(level.upper()), *args)
class BUIcompress():
def __init__(self, name, archive):
self.name = name
self.archive = archive
def __enter__(self):
self.arch = None
if self.archive == 'zip':
self.arch = zipfile.ZipFile(self.name, mode='w', compression=zipfile.ZIP_DEFLATED)
elif self.archive == 'tar.gz':
self.arch = tarfile.open(self.name, 'w:gz')
elif self.archive == 'tar.bz2':
self.arch = tarfile.open(self.name, 'w:bz2')
return self
def __exit__(self, type, value, traceback):
self.arch.close()
def append(self, path, arcname):
if self.archive == 'zip':
self.arch.write(path, arcname)
elif self.archive in ['tar.gz', 'tar.bz2']:
self.arch.add(path, arcname=arcname, recursive=False)
| {
"content_hash": "d3fdbf0fb4c4fb8c6161023d1a20ebc4",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 103,
"avg_line_length": 35.52542372881356,
"alnum_prop": 0.5403148854961832,
"repo_name": "bedaes/burp-ui",
"id": "eadd38956e9d45e3220888abc2fb00292acb6310",
"size": "4216",
"binary": false,
"copies": "1",
"ref": "refs/heads/reverse-proxied",
"path": "burpui/misc/utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "28109"
},
{
"name": "HTML",
"bytes": "55400"
},
{
"name": "JavaScript",
"bytes": "64689"
},
{
"name": "Python",
"bytes": "239319"
},
{
"name": "Shell",
"bytes": "15049"
}
],
"symlink_target": ""
} |
from sqlalchemy.schema import Column
from sqlalchemy.schema import MetaData
from sqlalchemy.schema import Table
from melange.db.sqlalchemy.migrate_repo.schema import String
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
ip_blocks = Table('ip_blocks', meta, autoload=True)
network_name = Column('network_name', String(255))
ip_blocks.create_column(network_name)
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
ip_blocks = Table('ip_blocks', meta, autoload=True)
ip_blocks.drop_column('network_name')
| {
"content_hash": "115cf3d028b71f1873af2f289c3e429f",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 60,
"avg_line_length": 27.136363636363637,
"alnum_prop": 0.7269681742043551,
"repo_name": "rackerlabs/melange",
"id": "bb3d3ca42da39eee2c396a2cebee7c93b0cdac1b",
"size": "1250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "melange/db/sqlalchemy/migrate_repo/versions/003_add_network_label_to_ip_blocks.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "11031"
},
{
"name": "JavaScript",
"bytes": "7403"
},
{
"name": "Python",
"bytes": "630677"
},
{
"name": "Shell",
"bytes": "5182"
}
],
"symlink_target": ""
} |
from Player import Player
class Game:
def __init__(self):
self.timebank = 0
self.timePerMove = 0
self.enemy = Player()
self.me = Player()
self.piece = None
self.piecePosition = None
self.nextPiece = None
self.round = 0
| {
"content_hash": "91189fdc61291126b6f406721b3d2e02",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 33,
"avg_line_length": 19.25,
"alnum_prop": 0.5162337662337663,
"repo_name": "Dduvacher/TetrisBot",
"id": "43fad589fe6b9f19d0033442c6ad501d154c91f3",
"size": "308",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Bot/Game/Game.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "15535"
}
],
"symlink_target": ""
} |
from django.core.exceptions import ValidationError, ObjectDoesNotExist
from django.db import models
from django.contrib.auth.models import User
from django.utils.html import format_html
from django.conf import settings
from django.db.models.signals import post_save, post_delete, pre_delete
from django.dispatch import receiver
# from django.utils import timezone
from rest_framework.authtoken.models import Token
from timezone_field.fields import TimeZoneField
def validate_color(value):
try:
int(value, 16)
except ValueError:
raise ValidationError('{0} is not a hex color!'.format(value))
class Profile(models.Model):
user = models.OneToOneField(User, primary_key=True)
timezone = TimeZoneField(default=settings.TIME_ZONE)
class Tag(models.Model):
user = models.ForeignKey(User)
name = models.CharField(max_length=64, db_index=True)
color = models.CharField(max_length=6, validators=[validate_color])
def colored_name(self):
return format_html('<span style="color: #{};">{}</span>', self.color, self.name)
colored_name.admin_order_field = 'name'
def __str__(self):
return self.name
class Meta:
unique_together = ['user', 'name']
class Category(models.Model):
DEFAULT_NAME = '(default category)'
user = models.ForeignKey(User)
name = models.CharField(max_length=256, db_index=True)
def __str__(self):
return self.name
@classmethod
def get_or_create_default(cls, user):
return cls.objects.get_or_create(user=user, name=cls.DEFAULT_NAME)[0]
@classmethod
def get_default_category(cls, user):
try:
return cls.objects.get(user=user, name=cls.DEFAULT_NAME)
except ObjectDoesNotExist:
return None
@classmethod
def delete_default_if_empty(cls, user):
category = cls.get_default_category(user)
if category and not category.todo_set.exists():
category.delete()
class Meta:
unique_together = ['user', 'name']
verbose_name = 'Category'
verbose_name_plural = 'Categories'
class Todo(models.Model):
user = models.ForeignKey(User)
category = models.ForeignKey(Category, blank=True, on_delete=models.DO_NOTHING)
tags = models.ManyToManyField(Tag, blank=True)
text = models.CharField(max_length=256, db_index=True)
is_done = models.BooleanField(default=False, db_index=True)
deadline = models.DateTimeField(null=True, blank=True, db_index=True)
def mark_done(self, new_state):
self.is_done = new_state
self.save()
def reset_category(self):
self.category_id = None
self.save()
def save(self, *args, **kwargs):
if self.category_id is None:
self.category = Category.get_or_create_default(self.user)
elif self.category.user.pk != self.user.pk:
raise ValidationError({'category': 'You do not own that category!'})
super().save(*args, **kwargs)
class Meta:
ordering = ('deadline',)
@receiver(pre_delete, sender=Category)
def set_default_category_to_todo_set(sender, instance, **kwargs):
for todo in instance.todo_set.all():
todo.reset_category()
@receiver((post_delete, post_save), sender=Todo)
def delete_default_category_if_empty(sender, instance=None, **kwargs):
Category.delete_default_if_empty(instance.user)
@receiver(post_save, sender=settings.AUTH_USER_MODEL)
def create_auth_token(sender, instance=None, created=False, **kwargs):
if created:
Token.objects.create(user=instance)
| {
"content_hash": "02e32eff13f9168c69694f6e6fa595b3",
"timestamp": "",
"source": "github",
"line_count": 114,
"max_line_length": 88,
"avg_line_length": 31.45614035087719,
"alnum_prop": 0.6787506971556051,
"repo_name": "Azarn/mytodo",
"id": "abfefc1bbabf58eadbe79ca5bbbd7656eb39cb7f",
"size": "3586",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "todo/models.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "37251"
}
],
"symlink_target": ""
} |
__author__ = 'Matt Delaney'
import sys
import rospy
from std_msgs.msg import Int64, Int32
sys.path.append("/home/pi/ROSta-Bot/src/transporter/src")
from transport_drive_motor_API import *
from state_machine import *
from driving_interface.msg import position
# This is a simple 3-state machine. It moves the robot to a predetermined distance from the bin.
#
#
#
# -----------
# - sensor->
# - faces
# - forwards
# -----------
# (Forwards ->)
#
class SimpleStateMachine:
def __init__(self):
# Initialize the wooden-robot state machine.
self.woodenStateMachine = StateMachine()
# Initialize the transport drive motor command center (API).
self.robot = TransportDriveMotorAPI()
# The start state is the state where the robot does not move.
start = self.woodenStateMachine.addState("not moving", self.robot.do_not_move)
rotating = self.woodenStateMachine.addState("rotating to face a specific direction", self.rotateToFace)
# set the start state and current state to the new start state.
self.woodenStateMachine.startState = start
self.woodenStateMachine.currentState = start
# If the robot is "close enough" to the target angle, stay put.
start.addTransition("close enough: No movement necessary", self.should_robot_restart, start)
start.addTransition("Angle is bad: Rotating.", self.check_robot_angle, rotating)
rotating.addTransition("Angle is bad: Continue Rotating", self.check_robot_angle, rotating)
rotating.addTransition("Angle is close enough: Stop Rotating", self.should_robot_stop, start)
# All measurements are in Centimeters.
# At what angle to the target do we want to be? (degrees)
self.targetAngle = 0
# How far away from the target are we, currently? (degrees)
self.currentRobotOrientation = 0
# How far away from the target can we be and still be 'close enough'? (degrees)
self.STOP_AT = 10
self.RESTART_AT = 15
self.CLOCKWISE = -1
self.NOT_MOVING = 0
self.COUNTERCLOCKWISE = 3
self.currentCameraAngle = 0
self.currentPosition = position()
self.currentPosition.yPose = 180
# Current direction of rotation
self.current_rotation_direction = self.NOT_MOVING
# Subscribe to the "target robot angle" topic (note that this topic may not be active / may need to be
# manually set).
#rospy.init_node("simple_state_machine")
self.target_angle_subscriber = rospy.Subscriber("target_robot_angle", Int64, self.target_angle_changed)
# Subscribe to the current distance from the target. For now, that's an IR value.
self.current_angle_subscriber = rospy.Subscriber("range_data", position, self.current_position_reading_changed)
self.current_camera_angle_subscriber = rospy.Subscriber("current_camera_angle", Int32, self.current_camera_angle_changed )
#spin for all eternity. Note that, in Python, each ROS callback NEEDS TO TICK THE STATE MACHINE.
rospy.spin()
# Is the robot not close enough to the target distance? Do we want to relocate?
def should_robot_restart(self):
return abs(self.targetAngle - self.currentRobotOrientation) < self.RESTART_AT
# Should the robot stay put?
def should_robot_stop(self):
#TODO update this!
return abs(self.targetAngle - self.currentRobotOrientation) < self.STOP_AT
def rotate_to_face(self):
self.currentRobotOrientation = 180 - self.currentPosition.yPose - self.currentCameraAngle
if (self.currentRobotOrientation - self.targetAngle) > 0:
# Rotate Clockwise
if self.current_rotation_direction != self.CLOCKWISE:
self.current_rotation_direction = self.CLOCKWISE
self.robot.simple_turn_direction(self.CLOCKWISE)
else:
#Rotate Counterclockwise
if self.current_rotation_direction != self.COUNTERCLOCKWISE:
self.current_rotation_direction = self.COUNTERCLOCKWISE
self.robot.simple_turn_direction(self.COUNTERCLOCKWISE)
# function callback for when the target distance is changed.
# this should, for the time being, be a value between 30cm and 150cm.
# Once the distance is not IR-dependent, this range can expand.
def target_angle_changed(self, new_target_angle):
self.targetAngle = new_target_angle.data
# Data has changed! Tick the state machine!
self.woodenStateMachine.tick()
def current_position_reading_changed(self, new_position):
self.currentPosition = new_position
# Data has changed! Tick the state machine!
self.woodenStateMachine.tick()
def current_camera_angle_changed(self, new_camera_angle):
self.currentCameraAngle = new_camera_angle.data
self.woodenStateMachine.tick()
ssm = SimpleStateMachine()
| {
"content_hash": "0f6c3dbd937e951ac4446fbf87b5b4ae",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 130,
"avg_line_length": 46.373831775700936,
"alnum_prop": 0.6823861346231358,
"repo_name": "MarsRobotics/ROSta-Bot",
"id": "b680ba9eec1947fc3fc6e3681452479b7db35f38",
"size": "4984",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/state_machine/src/rotation_state_machine.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Arduino",
"bytes": "62195"
},
{
"name": "C",
"bytes": "10422"
},
{
"name": "C++",
"bytes": "33941"
},
{
"name": "CMake",
"bytes": "26398"
},
{
"name": "Python",
"bytes": "39388"
},
{
"name": "Shell",
"bytes": "1026"
}
],
"symlink_target": ""
} |
import sys
from itertools import chain
from django import forms
from django.conf import settings
from django.db.models.query import QuerySet
from django.utils.encoding import force_text
from django.utils.html import conditional_escape, escape
from django.utils.safestring import mark_safe
try:
from django.forms.utils import flatatt
except ImportError:
from django.forms.util import flatatt
from django.utils.translation import ugettext_lazy as _
if sys.version_info[0] < 3:
iteritems = lambda d: iter(d.iteritems())
string_types = basestring,
str_ = unicode
else:
iteritems = lambda d: iter(d.items())
string_types = str,
str_ = str
STATIC_URL = getattr(settings, 'STATIC_URL', settings.MEDIA_URL)
class SortedMultipleChoiceField(forms.ModelMultipleChoiceField):
def __init__(self, *args, **kwargs):
if not kwargs.get('widget'):
kwargs['widget'] = SortedFilteredSelectMultiple(
is_stacked=kwargs.get('is_stacked', False)
)
super(SortedMultipleChoiceField, self).__init__(*args, **kwargs)
def clean(self, value):
queryset = super(SortedMultipleChoiceField, self).clean(value)
if value is None or not isinstance(queryset, QuerySet):
return queryset
object_list = dict((
(str_(key), value)
for key, value in iteritems(queryset.in_bulk(value))))
return [object_list[str_(pk)] for pk in value]
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_text(value) for value in self.prepare_value(initial)]
data_set = [force_text(value) for value in data]
return data_set != initial_set
class SortedFilteredSelectMultiple(forms.SelectMultiple):
"""
A SortableSelectMultiple with a JavaScript filter interface.
Requires jQuery to be loaded.
Note that the resulting JavaScript assumes that the jsi18n
catalog has been loaded in the page
"""
def __init__(self, is_stacked=False, attrs=None, choices=()):
self.is_stacked = is_stacked
super(SortedFilteredSelectMultiple, self).__init__(attrs, choices)
class Media:
css = {
'screen': (STATIC_URL + 'sortedm2m_filter_horizontal_widget/widget.css',)
}
js = (STATIC_URL + 'sortedm2m_filter_horizontal_widget/OrderedSelectBox.js',
STATIC_URL + 'sortedm2m_filter_horizontal_widget/OrderedSelectFilter.js',
STATIC_URL + 'sortedm2m_filter_horizontal_widget/jquery.min.js')
def build_attrs(self, attrs=None, extra_attrs=None, **kwargs):
attrs = dict(attrs, **kwargs)
if extra_attrs:
attrs.update(extra_attrs)
classes = attrs.setdefault('class', '').split()
classes.append('sortedm2m')
if self.is_stacked: classes.append('stacked')
attrs['class'] = u' '.join(classes)
return attrs
def render(self, name, value, attrs=None, choices=(), renderer=None):
if attrs is None: attrs = {}
if value is None: value = []
admin_media_prefix = getattr(settings, 'ADMIN_MEDIA_PREFIX', STATIC_URL + 'admin/')
final_attrs = self.build_attrs(self.attrs, attrs, name=name)
output = [u'<select multiple="multiple"%s>' % flatatt(final_attrs)]
options = self.render_options(choices, value)
if options:
output.append(options)
if 'verbose_name' in final_attrs.keys():
verbose_name = final_attrs['verbose_name']
else:
verbose_name = name.split('-')[-1]
output.append(u'</select>')
output.append(u'<script>window.addEventListener("load", function(e) {')
output.append(u'OrderedSelectFilter.init("id_%s", "%s", %s, "%s") });</script>\n' % \
(name, verbose_name, int(self.is_stacked), admin_media_prefix))
output.append(u"""
<script>
(function($) {
$(document).ready(function() {
var updateOrderedSelectFilter = function() {
// If any SelectFilter widgets are a part of the new form,
// instantiate a new SelectFilter instance for it.
if (typeof OrderedSelectFilter != "undefined"){
$(".sortedm2m").each(function(index, value){
var namearr = value.name.split('-');
OrderedSelectFilter.init(value.id, namearr[namearr.length-1], false, "%s");
});
$(".sortedm2mstacked").each(function(index, value){
var namearr = value.name.split('-');
OrderedSelectFilter.init(value.id, namearr[namearr.length-1], true, "%s");
});
}
}
$(document).on('formset:added', function(row, prefix) {
updateOrderedSelectFilter();
});
});
})(django.jQuery)
</script>""" % (admin_media_prefix, admin_media_prefix))
return mark_safe(u'\n'.join(output))
def render_option(self, selected_choices, option_value, option_label):
option_value = force_text(option_value)
selected_html = (option_value in selected_choices) and u' selected="selected"' or ''
try:
index = list(selected_choices).index(escape(option_value))
selected_html = u'%s %s' % (u' data-sort-value="%s"' % index, selected_html)
except ValueError:
pass
return u'<option value="%s"%s>%s</option>' % (
escape(option_value), selected_html,
conditional_escape(force_text(option_label)))
def render_options(self, choices, selected_choices):
# Normalize to strings.
selected_choices = list(force_text(v) for v in selected_choices)
output = []
for option_value, option_label in chain(self.choices, choices):
if isinstance(option_label, (list, tuple)):
output.append(u'<optgroup label="%s">' % escape(force_text(option_value)))
for option in option_label:
output.append(self.render_option(selected_choices, *option))
output.append(u'</optgroup>')
else:
output.append(self.render_option(selected_choices, option_value, option_label))
return u'\n'.join(output)
def _has_changed(self, initial, data):
if initial is None:
initial = []
if data is None:
data = []
if len(initial) != len(data):
return True
initial_set = [force_text(value) for value in initial]
data_set = [force_text(value) for value in data]
return data_set != initial_set
| {
"content_hash": "f46f48408d2c92d65db7ceccaf7253d1",
"timestamp": "",
"source": "github",
"line_count": 173,
"max_line_length": 103,
"avg_line_length": 40.26589595375722,
"alnum_prop": 0.5898650588573069,
"repo_name": "svleeuwen/sortedm2m-filter-horizontal-widget",
"id": "d722f5d8f30ef79a33471b11639a541b3c5f47a1",
"size": "6990",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "sortedm2m_filter_horizontal_widget/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "2375"
},
{
"name": "JavaScript",
"bytes": "17948"
},
{
"name": "Python",
"bytes": "17147"
}
],
"symlink_target": ""
} |
"""The implementation of `tf.data.Dataset.unique`."""
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.data.util import nest
from tensorflow.python.framework import dtypes
from tensorflow.python.ops import gen_experimental_dataset_ops
def _unique(self, name): # pylint: disable=unused-private-name
return _UniqueDataset(self, name)
class _UniqueDataset(dataset_ops.UnaryUnchangedStructureDataset):
"""A dataset containing the unique elements of an input dataset."""
def __init__(self, input_dataset, name=None):
"""See `tf.data.Dataset.unique` for details."""
self._input_dataset = input_dataset
for ty in nest.flatten(dataset_ops.get_legacy_output_types(input_dataset)):
if ty not in (dtypes.int32, dtypes.int64, dtypes.string):
raise TypeError(
f"`tf.data.Dataset.unique` does not support type {ty} -- only "
f"`tf.int32`, `tf.int64`, and `tf.string` are supported.")
self._name = name
variant_tensor = gen_experimental_dataset_ops.unique_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
**self._common_args)
super().__init__(input_dataset, variant_tensor)
| {
"content_hash": "ec7e0ec03d905e19bcae2bb44ef3b3af",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 80,
"avg_line_length": 43.142857142857146,
"alnum_prop": 0.7077814569536424,
"repo_name": "yongtang/tensorflow",
"id": "2184254139f711bc2a7d848508fcb295a07a9e2c",
"size": "1897",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tensorflow/python/data/ops/unique_op.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1368342"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "125162438"
},
{
"name": "CMake",
"bytes": "179878"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2118448"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792868"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11205807"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300198"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42642473"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621427"
},
{
"name": "Smarty",
"bytes": "89545"
},
{
"name": "SourcePawn",
"bytes": "14607"
},
{
"name": "Starlark",
"bytes": "7577804"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
} |
import tinysegmenter
from . import base
import testable
class Tzer(base.Tzer):
'''A wrapper for the TinySegmenter tokenizer for Japanese. e.g.:
>>> Tzer(1).tokenize(base.T_JP) == base.T_JP_TOKS
True'''
def __init__(self, ngram):
base.Tzer.__init__(self, ngram)
self.seg = tinysegmenter.TinySegmenter()
def tokenize_real(self, text):
return [i.lower() for i in self.seg.tokenize(text)]
testable.register('')
| {
"content_hash": "ac2dae8715a0db1fca8acb3e40f69069",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 21.61904761904762,
"alnum_prop": 0.6475770925110133,
"repo_name": "casmlab/quac",
"id": "87bd56fd0cde24c65455890f04d24124553ed07c",
"size": "518",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "lib/tok/tiny.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5004"
},
{
"name": "Gnuplot",
"bytes": "1396"
},
{
"name": "Makefile",
"bytes": "12373"
},
{
"name": "PLpgSQL",
"bytes": "2740"
},
{
"name": "Python",
"bytes": "570122"
},
{
"name": "Shell",
"bytes": "56557"
}
],
"symlink_target": ""
} |
from tornado.httpclient import AsyncHTTPClient
from tornado.concurrent import Future
from tornado import gen
from util import stop_loop, start_loop
@gen.coroutine
def fetch_coroutine(url):
http_client = AsyncHTTPClient()
response = yield http_client.fetch(url)
raise gen.Return(response)
def fetch_coroutine_callback(future):
result = future.result()
print('coroutine callback')
print(result.request.url, result.code, result.reason, result.request_time)
stop_loop(1)
if __name__ == '__main__':
"""
使用gen.coroutine 可以很方便让包含 yield 的函数返回future
"""
result_future = fetch_coroutine('https://baidu.com')
result_future.add_done_callback(fetch_coroutine_callback)
start_loop()
| {
"content_hash": "bfb6848e1c1643117ff058511aa40e3c",
"timestamp": "",
"source": "github",
"line_count": 28,
"max_line_length": 78,
"avg_line_length": 26,
"alnum_prop": 0.7156593406593407,
"repo_name": "zhyq0826/tornado-asyn",
"id": "9f83994d3afb32e4d2255607db76431a62447c1d",
"size": "780",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/main04.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "930"
},
{
"name": "Python",
"bytes": "17235"
}
],
"symlink_target": ""
} |
"""Python bindings for creating and serving the Reverb ReverbService.
See ./client.py and ./tf_client.py for details of how to interact with the
service.
"""
from __future__ import annotations
import abc
import collections
from typing import Optional, Sequence
from absl import logging
import portpicker
from reverb import client
from reverb import item_selectors
from reverb import pybind
from reverb import rate_limiters
from reverb import reverb_types
from reverb.platform.default import checkpointers
import termcolor
import tree
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import tensor_spec
from tensorflow.python.saved_model import nested_structure_coder
# pylint: enable=g-direct-tensorflow-import
class TableExtensionBase(metaclass=abc.ABCMeta):
"""Abstract base class for Table extensions."""
@abc.abstractmethod
def build_internal_extensions(
self,
table_name: str,
) -> Sequence[pybind.TableExtension]:
"""Constructs the c++ PriorityTableExtensions."""
class Table:
"""Item collection with configurable strategies for insertion and sampling.
A `Table` is the structure used to interact with the data stored on a server.
Each table can contain a limited number of "items" that can be retrieved
according to the strategy defined by the `sampler`. The size of a table, in
terms of number of items, is limited to `max_size`. When items are inserted
into an already full table the `remover` is used to decide which item should
be removed.
In addition to the selection strategies used to select items for retrieval and
removal the flow of data is controlled by a `RateLimiter`. A rate limiter
controlls high level relations between inserts and samples by defining a
target ratio between the two and what level of deviations from the target is
acceptable. This is particularily useful when scaling up from single machine
use cases to distributed systems as the same "logical" throughput can be kept
constant even though the scale has changed by orders of magnitude.
It is important to note that "data elements" and "items" are related but
distinct types of entities.
Data element:
- The actual data written using `Writer.append`.
- Immutable once written.
- Is not stored in a `Table`.
- Can be referenced by items from one or more distinct `Table`.
- Cannot be retrieved in any other way than as a part of an item.
Item:
- The entity stored in a `Table`.
- Inserted using `Writer.create_item`.
- References one or more data elements, creating a "sequence".
The fact that data elements can be referenced by more than one item from one
or multiple tables means thats one has to be careful not to equate the size of
a table (in terms of items) with the amount of data it references. The data
will remain in memory on the server until the last item that references it is
removed from its table. Removing an item from a table does therefore not
neccesarily result in any (significant) change in memory usage and one must be
careful when selecting remover strategies for a multi table server. Consider
for example a server with two tables. One has a FIFO remover and the other
LIFO remover. In this scenario, the two tables would not share any chunks and
would eventually consume twice the amount of memory compared a similar setup
where the two tables share the same type of removal strategy.
"""
def __init__(self,
name: str,
sampler: reverb_types.SelectorType,
remover: reverb_types.SelectorType,
max_size: int,
rate_limiter: rate_limiters.RateLimiter,
max_times_sampled: int = 0,
extensions: Sequence[TableExtensionBase] = (),
signature: Optional[reverb_types.SpecNest] = None):
"""Constructor of the Table.
Args:
name: Name of the priority table.
sampler: The strategy to use when selecting samples.
remover: The strategy to use when selecting which items to remove.
max_size: The maximum number of items which the replay is allowed to hold.
When an item is inserted into an already full priority table the
`remover` is used for selecting which item to remove before proceeding
with the new insert.
rate_limiter: Manages the data flow by limiting the sample and insert
calls.
max_times_sampled: Maximum number of times an item can be sampled before
it is deleted. Any value < 1 is ignored and means there is no limit.
extensions: Optional sequence of extensions used to add extra features to
the table.
signature: Optional nested structure containing `tf.TypeSpec` objects,
describing the schema of items in this table.
Raises:
ValueError: If name is empty.
ValueError: If max_size <= 0.
"""
if not name:
raise ValueError('name must be nonempty')
if max_size <= 0:
raise ValueError('max_size (%d) must be a positive integer' % max_size)
self._sampler = sampler
self._remover = remover
self._rate_limiter = rate_limiter
self._extensions = extensions
self._signature = signature
# Merge the c++ extensions into a single list.
internal_extensions = []
for extension in extensions:
internal_extensions += list(extension.build_internal_extensions(name))
if signature:
flat_signature = tree.flatten(signature)
for s in flat_signature:
if not isinstance(s, tensor_spec.TensorSpec):
raise ValueError(f'Unsupported signature spec: {s}')
signature_proto_str = (
nested_structure_coder.encode_structure(
signature).SerializeToString())
else:
signature_proto_str = None
self.internal_table = pybind.Table(
name=name,
sampler=sampler,
remover=remover,
max_size=max_size,
max_times_sampled=max_times_sampled,
rate_limiter=rate_limiter.internal_limiter,
extensions=internal_extensions,
signature=signature_proto_str)
@classmethod
def queue(cls,
name: str,
max_size: int,
extensions: Sequence[TableExtensionBase] = (),
signature: Optional[reverb_types.SpecNest] = None) -> Table:
"""Constructs a Table which acts like a queue.
Args:
name: Name of the priority table (aka queue).
max_size: Maximum number of items in the priority table (aka queue).
extensions: See documentation in the constructor.
signature: See documentation in the constructor.
Returns:
Table which behaves like a queue of size `max_size`.
"""
return cls(
name=name,
sampler=item_selectors.Fifo(),
remover=item_selectors.Fifo(),
max_size=max_size,
max_times_sampled=1,
rate_limiter=rate_limiters.Queue(max_size),
extensions=extensions,
signature=signature)
@classmethod
def stack(cls,
name: str,
max_size: int,
extensions: Sequence[TableExtensionBase] = (),
signature: Optional[reverb_types.SpecNest] = None) -> Table:
"""Constructs a Table which acts like a stack.
Args:
name: Name of the priority table (aka stack).
max_size: Maximum number of items in the priority table (aka stack).
extensions: See documentation in the constructor.
signature: See documentation in the constructor.
Returns:
Table which behaves like a stack of size `max_size`.
"""
return cls(
name=name,
sampler=item_selectors.Lifo(),
remover=item_selectors.Lifo(),
max_size=max_size,
max_times_sampled=1,
rate_limiter=rate_limiters.Stack(max_size),
extensions=extensions,
signature=signature)
@property
def name(self) -> str:
return self.internal_table.name()
@property
def info(self) -> reverb_types.TableInfo:
proto_string = self.internal_table.info()
return reverb_types.TableInfo.from_serialized_proto(proto_string)
def can_sample(self, num_samples: int) -> bool:
"""Returns True if a sample operation is permitted at the current state."""
return self.internal_table.can_sample(num_samples)
def can_insert(self, num_inserts: int) -> bool:
"""Returns True if an insert operation is permitted at the current state."""
return self.internal_table.can_insert(num_inserts)
def replace(self,
name: Optional[str] = None,
sampler: Optional[reverb_types.SelectorType] = None,
remover: Optional[reverb_types.SelectorType] = None,
max_size: Optional[int] = None,
rate_limiter: Optional[rate_limiters.RateLimiter] = None,
max_times_sampled: Optional[int] = None,
extensions: Optional[Sequence[TableExtensionBase]] = None,
signature: Optional[reverb_types.SpecNest] = None) -> Table:
"""Constructs a new, empty table from the definition of the current one.
All settings needed to construct the table that are not explicitly specified
are copied from the source table.
Args:
name: Name of the table to use, or None to re-use existing table's name.
sampler: The strategy to use when selecting samples, or None to re-use
existing table's sampler.
remover: The strategy to use when selecting which items to remove, or None
to re-use existing table's remover.
max_size: The maximum number of items which the replay is allowed to hold,
or None to re-use existing table's max_size.
rate_limiter: Manages the data flow by limiting the sample and insert
calls. Configuration of the original table is used when not specified.
max_times_sampled: Maximum number of times an item can be sampled before
it is deleted, or None to re-use existing table's max_size.
extensions: Optional sequence of extensions used to add extra features to
the table, or None to re-use existing table's max_size.
signature: Optional nested structure containing `tf.TypeSpec` objects,
describing the schema of items in this table, or None to re-use existing
table's max_size.
Returns:
Table with the same configuration as the original one (modulo overrides).
"""
info = self.info
if not sampler:
sampler = pybind.selector_from_proto(
info.sampler_options.SerializeToString())
if not remover:
remover = pybind.selector_from_proto(
info.remover_options.SerializeToString())
if not rate_limiter:
rate_limiter = rate_limiters.RateLimiter(
samples_per_insert=info.rate_limiter_info.samples_per_insert,
min_size_to_sample=info.rate_limiter_info.min_size_to_sample,
min_diff=info.rate_limiter_info.min_diff,
max_diff=info.rate_limiter_info.max_diff)
pick = lambda a, b: a if a is not None else b
return Table(
name=pick(name, self.name),
sampler=sampler,
remover=remover,
max_size=pick(max_size, info.max_size),
rate_limiter=rate_limiter,
max_times_sampled=pick(max_times_sampled, info.max_times_sampled),
extensions=pick(extensions, self._extensions),
signature=pick(signature, self._signature))
def __repr__(self) -> str:
return repr(self.internal_table)
class Server:
"""Reverb replay server.
The Server hosts the gRPC-service deepmind.reverb.ReverbService (see
reverb_service.proto). See ./client.py and ./tf_client for details of how to
interact with the service.
A Server maintains inserted data and one or more PriorityTables. Multiple
tables can be used to provide different views of the same underlying and since
the operations performed by the Table is relatively inexpensive compared to
operations on the actual data using multiple tables referencing the same data
is encouraged over replicating data.
"""
def __init__(self,
tables: Optional[Sequence[Table]] = None,
port: Optional[int] = None,
checkpointer: Optional[checkpointers.CheckpointerBase] = None):
"""Constructor of Server serving the ReverbService.
Args:
tables: A sequence of tables to host on the server.
port: The port number to serve the gRPC-service on. If `None` (default)
then a port is automatically picked and assigned.
checkpointer: Checkpointer used for storing/loading checkpoints. If None
(default) then `checkpointers.default_checkpointer` is used to
construct the checkpointer.
Raises:
ValueError: If tables is empty.
ValueError: If multiple Table in tables share names.
"""
if not tables:
raise ValueError('At least one table must be provided')
names = collections.Counter(table.name for table in tables)
duplicates = [name for name, count in names.items() if count > 1]
if duplicates:
raise ValueError('Multiple items in tables have the same name: {}'.format(
', '.join(duplicates)))
if port is None:
port = portpicker.pick_unused_port()
if checkpointer is None:
checkpointer = checkpointers.default_checkpointer()
self._server = pybind.Server([table.internal_table for table in tables],
port, checkpointer.internal_checkpointer())
self._port = port
def __del__(self):
"""Stop server and free up the port if was reserved through portpicker."""
if hasattr(self, '_server'):
self.stop()
if hasattr(self, '_port'):
portpicker.return_port(self._port)
def __repr__(self) -> str:
return repr(self._server)
@property
def port(self) -> int:
"""Port the gRPC service is running at."""
return self._port
def stop(self):
"""Request that the ReverbService is terminated and wait for shutdown."""
return self._server.Stop()
def wait(self):
"""Blocks until the service is shut down.
This method will never return unless the server is shut down which will only
happen if:
* `Server.stop` is called by another thread.
* A KeyboardInterrupt is raised (i.e. a SIGINT signal is sent to the
process).
Raises:
KeyboardInterrupt: If the server was killed by a SIGINT.
"""
if self._server.Wait():
raise KeyboardInterrupt
def localhost_client(self) -> client.Client:
"""Creates a client connect to the localhost channel."""
return client.Client(f'localhost:{self._port}')
| {
"content_hash": "892e751f28861531c064945d2a49c956",
"timestamp": "",
"source": "github",
"line_count": 377,
"max_line_length": 80,
"avg_line_length": 38.87267904509284,
"alnum_prop": 0.6841351074718526,
"repo_name": "deepmind/reverb",
"id": "28a474b1552c4d119335ef9cd3698921bdecd707",
"size": "15250",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "reverb/server.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "3939"
},
{
"name": "C++",
"bytes": "1169710"
},
{
"name": "Dockerfile",
"bytes": "3814"
},
{
"name": "Python",
"bytes": "292531"
},
{
"name": "Shell",
"bytes": "10124"
},
{
"name": "Starlark",
"bytes": "83075"
}
],
"symlink_target": ""
} |
"""
Nevow support for lore. DEPRECATED.
Don't use this module, it will be removed in the release of Twisted
after 2.3. If you want static templating with Nevow, instantiate a
rend.Page() and call renderString (or renderSynchronously) yourself.
Do something like::
lore -inevow --config pageclass=some.module.SomePageSubclass [other-opts]
Maintainer: Christopher Armstrong
"""
import warnings
warnings.warn("twisted.lore.nevowlore is deprecated. Please instantiate "
"rend.Page and call renderString or renderSynchronously "
"yourself.", DeprecationWarning, stacklevel=2)
import os
from twisted.web import microdom
from twisted.python import reflect
from twisted.web import sux
from twisted.lore import default, tree, process
from nevow import loaders
def parseStringAndReport(s):
try:
return microdom.parseString(s)
except microdom.MismatchedTags, e:
raise process.ProcessingFailure(
"%s:%s: begin mismatched tags <%s>/</%s>" %
(e.begLine, e.begCol, e.got, e.expect),
"%s:%s: end mismatched tags <%s>/</%s>" %
(e.endLine, e.endCol, e.got, e.expect))
except microdom.ParseError, e:
raise process.ProcessingFailure("%s:%s:%s" % (e.line, e.col, e.message))
except IOError, e:
raise process.ProcessingFailure(e.strerror)
def ____wait(d):
"."
from twisted.internet import reactor
from twisted.python import failure
l = []
d.addBoth(l.append)
while not l:
reactor.iterate()
if isinstance(l[0], failure.Failure):
l[0].raiseException()
return l[0]
def nevowify(filename, linkrel, ext, url, templ, options=None, outfileGenerator=tree.getOutputFileName):
if options is None:
options = {}
pclass = options['pageclass']
pclass = reflect.namedObject(pclass)
page = pclass(docFactory=loaders.htmlfile(filename))
s = page.renderString()
s = ____wait(s)
newFilename = outfileGenerator(filename, ext)
if options.has_key('nolore'):
f = open(newFilename, 'w')
f.write(s)
f.close()
return
doc = parseStringAndReport(s)
clonedNode = templ.cloneNode(1)
tree.munge(doc, clonedNode, linkrel, os.path.dirname(filename), filename, ext,
url, options, outfileGenerator)
tree.makeSureDirectoryExists(newFilename)
f = open(newFilename, 'wb')
clonedNode.writexml(f)
f.close()
class NevowProcessorFactory:
def getDoFile(self):
return nevowify
def generate_html(self, options, filenameGenerator=tree.getOutputFileName):
n = default.htmlDefault.copy()
n.update(options)
options = n
try:
fp = open(options['template'])
templ = microdom.parse(fp)
except IOError, e:
raise process.NoProcessorError(e.filename+": "+e.strerror)
except sux.ParseError, e:
raise process.NoProcessorError(str(e))
df = lambda file, linkrel: self.getDoFile()(file, linkrel, options['ext'],
options['baseurl'], templ, options, filenameGenerator)
return df
factory = NevowProcessorFactory()
| {
"content_hash": "94ea712326cc6c0dcd2b9eebb399206e",
"timestamp": "",
"source": "github",
"line_count": 107,
"max_line_length": 106,
"avg_line_length": 30.233644859813083,
"alnum_prop": 0.6460587326120556,
"repo_name": "hortonworks/hortonworks-sandbox",
"id": "a30ea1a6a72cc4b125948090f1723c933005c89d",
"size": "3321",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "desktop/core/ext-py/Twisted/twisted/lore/nevowlore.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ActionScript",
"bytes": "27264"
},
{
"name": "Assembly",
"bytes": "207947"
},
{
"name": "C",
"bytes": "10279874"
},
{
"name": "C++",
"bytes": "208068"
},
{
"name": "CSS",
"bytes": "356769"
},
{
"name": "Emacs Lisp",
"bytes": "3171"
},
{
"name": "Java",
"bytes": "3064179"
},
{
"name": "JavaScript",
"bytes": "1532806"
},
{
"name": "PHP",
"bytes": "4160"
},
{
"name": "Perl",
"bytes": "139518"
},
{
"name": "Python",
"bytes": "27735073"
},
{
"name": "R",
"bytes": "12290"
},
{
"name": "Ruby",
"bytes": "5050"
},
{
"name": "Shell",
"bytes": "42062"
},
{
"name": "XSLT",
"bytes": "585"
}
],
"symlink_target": ""
} |
'''
./tdd_suite.py test; red_green_bar.py $? $COLUMNS
'''
import sys
import unittest
class TestSimpleExample(unittest.TestCase):
def test_simple_example(self):
'''
TestSimpleExample:
'''
self.assertEqual(0, 1)
fast_test_ls = [
TestSimpleExample,
]
slow_test_ls = [
TestSimpleExample,
]
def add_all_fast(suite):
for one_test in fast_test_ls:
suite.addTest(unittest.makeSuite(one_test))
def add_all_slow(suite):
for one_test in slow_test_ls:
suite.addTest(unittest.makeSuite(one_test))
def summary_status(suite):
text_test_result = unittest.TextTestRunner().run(suite)
return not not (text_test_result.failures or text_test_result.errors)
def perform_tests():
suite = unittest.TestSuite()
add_all_fast(suite)
return summary_status(suite)
def perform_slow_tests():
suite = unittest.TestSuite()
add_all_fast(suite)
add_all_slow(suite)
return summary_status(suite)
if __name__ == '__main__':
if len(sys.argv) >= 2 and sys.argv[1] == 'slowtest':
result = perform_slow_tests()
else:
result = perform_tests()
sys.exit(result)
| {
"content_hash": "ad9722eff39f65b4776909fc71f71170",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 73,
"avg_line_length": 19.616666666666667,
"alnum_prop": 0.637213254035684,
"repo_name": "kwadrat/rgb_tdd",
"id": "7b01ddf7038dedb44879a2f9847d2a14e3762426",
"size": "1225",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tdd_suite.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3881"
}
],
"symlink_target": ""
} |
"""Utility methods for evaluation of trained models."""
from typing import Sequence, Tuple
import jax
import jax.numpy as jnp
from jax_cfd.data import xarray_utils as xr_utils
import numpy as np
import xarray
# pytype complains about valid operations with xarray (e.g., see b/153704639),
# so it isn't worth the trouble of running it.
# pytype: skip-file
def absolute_error(
array: xarray.DataArray,
eval_model_name: str = 'learned',
target_model_name: str = 'ground_truth',
) -> xarray.DataArray:
"""Computes absolute error between to be evaluated and target models.
Args:
array: xarray.DataArray that contains model dimension with `eval_model_name`
and `target_model_name` coordinates.
eval_model_name: name of the model that is being evaluated.
target_model_name: name of the model representing the ground truth values.
Returns:
xarray.DataArray containing absolute value of errors between
`eval_model_name` and `target_model_name` models.
"""
predicted = array.sel(model=eval_model_name)
target = array.sel(model=target_model_name)
return abs(predicted - target).rename('_'.join([predicted.name, 'error']))
def state_correlation(
array: xarray.DataArray,
eval_model_name: str = 'learned',
target_model_name: str = 'ground_truth',
non_state_dims: Tuple[str, ...] = (xr_utils.XR_SAMPLE_NAME,
xr_utils.XR_TIME_NAME),
non_state_dims_to_average: Tuple[str, ...] = (xr_utils.XR_SAMPLE_NAME,),
) -> xarray.DataArray:
"""Computes normalized correlation of `array` between target and eval models.
The dimensions of the `array` are expected to consists of state dimensions
that are interpreted as a vector parametrizing the configuration of the system
and `non_state_dims`, that optionally are averaged over if included in
`non_state_dims_to_average`.
Args:
array: xarray.DataArray that contains model dimension with `eval_model_name`
and `target_model_name` coordinates.
eval_model_name: name of the model that is being evaluated.
target_model_name: name of the model representing the ground truth values.
non_state_dims: tuple of dimension names that are not a part of the state.
non_state_dims_to_average: tuple of `non_state_dims` to average over.
Returns:
xarray.DataArray containing normalized correlation between `eval_model_name`
and `target_model_name` models.
"""
predicted = array.sel(model=eval_model_name)
target = array.sel(model=target_model_name)
state_dims = list(set(predicted.dims) - set(non_state_dims))
predicted = xr_utils.normalize(predicted, state_dims)
target = xr_utils.normalize(target, state_dims)
result = (predicted * target).sum(state_dims).mean(non_state_dims_to_average)
return result.rename('_'.join([array.name, 'correlation']))
def approximate_quantiles(ds, quantile_thresholds):
"""Approximate quantiles of all arrays in the given xarray.Dataset."""
# quantiles can't be done in a blocked fashion in the current version of dask,
# so for now select only the first time step and create a single chunk for
# each array.
return ds.isel(time=0).chunk(-1).quantile(q=quantile_thresholds)
def below_error_threshold(
array: xarray.DataArray,
threshold: xarray.DataArray,
eval_model_name: str = 'learned',
target_model_name: str = 'ground_truth',
) -> xarray.DataArray:
"""Compute if eval model error is below a threshold based on the target."""
predicted = array.sel(model=eval_model_name)
target = array.sel(model=target_model_name)
return abs(predicted - target) <= threshold
def average(
array: xarray.DataArray,
ndim: int,
non_spatial_dims: Tuple[str, ...] = (xr_utils.XR_SAMPLE_NAME,)
) -> xarray.DataArray:
"""Computes spatial and `non_spatial_dims` mean over `array`.
Since complex values are not supported in netcdf format we currently check if
imaginary part can be discarded, otherwise an error is raised.
Args:
array: xarray.DataArray to take a mean of. Expected to have `ndim` spatial
dimensions with names as in `xr_utils.XR_SPATIAL_DIMS`.
ndim: number of spatial dimensions.
non_spatial_dims: tuple of dimension names to average besides space.
Returns:
xarray.DataArray with `ndim` spatial dimensions and `non_spatial_dims`
reduced to mean values.
Raises:
ValueError: if `array` contains non-real imaginary values.
"""
dims = list(non_spatial_dims) + list(xr_utils.XR_SPATIAL_DIMS[:ndim])
dims = [dim for dim in dims if dim in array.dims]
mean_values = array.mean(dims)
if np.iscomplexobj(mean_values):
raise ValueError('complex values are not supported.')
return mean_values
def energy_spectrum_metric(threshold=0.01):
"""Computes an energy spectrum metric that checks if a simulation failed."""
@jax.jit
def _energy_spectrum_metric(arr, ground_truth):
diff = jnp.abs(jnp.log(arr) - jnp.log(ground_truth))
metric = jnp.sum(jnp.where(ground_truth > threshold, diff, 0), axis=-1)
cutoff = jnp.sum(
jnp.where((arr > threshold) & (ground_truth < threshold),
jnp.abs(jnp.log(arr)), 0),
axis=-1)
return metric + cutoff
energy_spectrum_ds = lambda a, b: xarray.apply_ufunc( # pylint: disable=g-long-lambda
_energy_spectrum_metric, a, b, input_core_dims=[['kx'], ['kx']]).mean(
dim='sample')
return energy_spectrum_ds
def u_x_correlation_metric(threshold=0.5):
"""Computes a spacial spectrum metric that checks if a simulation failed."""
@jax.jit
def _u_x_correlation_metric(arr, ground_truth):
diff = (jnp.abs(arr - ground_truth))
metric = jnp.sum(
jnp.where(jnp.abs(ground_truth) > threshold, diff, 0), axis=-1)
cutoff = jnp.sum(
jnp.where(
(jnp.abs(arr) > threshold) & (jnp.abs(ground_truth) < threshold),
jnp.abs(arr), 0),
axis=-1)
return metric + cutoff
u_x_correlation_ds = lambda a, b: xarray.apply_ufunc( # pylint: disable=g-long-lambda
_u_x_correlation_metric, a, b, input_core_dims=[['dx'], ['dx']]).mean(
dim='sample')
return u_x_correlation_ds
def temporal_autocorrelation(array):
"""Computes temporal autocorrelation of array."""
dt = array['time'][1] - array['time'][0]
length = array.sizes['time']
subsample = max(1, int(1. / dt))
def _autocorrelation(array):
def _corr(x, d):
del x
arr1 = jnp.roll(array, d, 0)
ans = arr1 * array
ans = jnp.sum(
jnp.where(
jnp.arange(length).reshape(-1, 1, 1, 1) >= d, ans / length, 0),
axis=0)
return d, ans
_, full_result = jax.lax.scan(_corr, 0, jnp.arange(0, length, subsample))
return full_result
full_result = jax.jit(_autocorrelation)(
jnp.array(array.transpose('time', 'sample', 'x', 'model').u))
full_result = xarray.Dataset(
data_vars=dict(t_corr=(['time', 'sample', 'x', 'model'], full_result)),
coords={
'dt': np.array(array.time[slice(None, None, subsample)]),
'sample': array.sample,
'x': array.x,
'model': array.model
})
return full_result
def u_t_correlation_metric(threshold=0.5):
"""Computes a temporal spectrum metric that checks if a simulation failed."""
@jax.jit
def _u_t_correlation_metric(arr, ground_truth):
diff = (jnp.abs(arr - ground_truth))
metric = jnp.sum(
jnp.where(jnp.abs(ground_truth) > threshold, diff, 0), axis=-1)
cutoff = jnp.sum(
jnp.where(
(jnp.abs(arr) > threshold) & (jnp.abs(ground_truth) < threshold),
jnp.abs(arr), 0),
axis=-1)
return jnp.mean(metric + cutoff)
return _u_t_correlation_metric
def compute_summary_dataset(
model_ds: xarray.Dataset,
ground_truth_ds: xarray.Dataset,
quantile_thresholds: Sequence[float] = (0.1, 1.0),
non_spatial_dims: Tuple[str, ...] = (xr_utils.XR_SAMPLE_NAME,)
) -> xarray.Dataset:
"""Computes sample and space averaged summaries of predictions and errors.
Args:
model_ds: dataset containing trajectories unrolled using the model.
ground_truth_ds: dataset containing ground truth trajectories.
quantile_thresholds: quantile thresholds to use for "within error" metrics.
non_spatial_dims: tuple of dimension names to average besides space.
Returns:
xarray.Dataset containing observables and absolute value errors
averaged over sample and spatial dimensions.
"""
ndim = ground_truth_ds.attrs['ndim']
eval_model_name = 'eval_model'
target_model_name = 'ground_truth'
combined_dataset = xarray.concat([model_ds, ground_truth_ds], dim='model')
combined_dataset.coords['model'] = [eval_model_name, target_model_name]
combined_dataset = combined_dataset.sel(time=slice(None, 500))
summaries = [combined_dataset[u] for u in xr_utils.XR_VELOCITY_NAMES[:ndim]]
spectrum = xr_utils.energy_spectrum(combined_dataset).rename(
'energy_spectrum')
summaries += [
xr_utils.kinetic_energy(combined_dataset),
xr_utils.speed(combined_dataset),
spectrum,
]
# TODO(dkochkov) Check correlations in NS and enable it for 2d and 3d.
if ndim == 1:
correlations = xr_utils.velocity_spatial_correlation(combined_dataset, 'x')
time_correlations = temporal_autocorrelation(combined_dataset)
summaries += [correlations[variable] for variable in correlations]
u_x_corr_sum = [
xarray.DataArray((u_x_correlation_metric(threshold)( # pylint: disable=g-complex-comprehension
correlations.sel(model=eval_model_name),
correlations.sel(model=target_model_name))).u_x_correlation)
for threshold in [0.5]
]
if not time_correlations.t_corr.isnull().any():
# autocorrelation is a constant, so it is expanded to be part of summaries
u_t_corr_sum = [
xarray.ones_like(u_x_corr_sum[0]).rename('autocorrelation') * # pylint: disable=g-complex-comprehension
u_t_correlation_metric(threshold)(
jnp.array(time_correlations.t_corr.sel(model=eval_model_name)),
jnp.array(time_correlations.t_corr.sel(model=target_model_name)))
for threshold in [0.5]
]
else:
# if the trajectory goes to nan, it just reports a large number
u_t_corr_sum = [
xarray.ones_like(u_x_corr_sum[0]).rename('autocorrelation') * np.infty
for threshold in [0.5]
]
energy_sum = [
energy_spectrum_metric(threshold)( # pylint: disable=g-complex-comprehension
spectrum.sel(model=eval_model_name, kx=slice(0, spectrum.kx.max())),
spectrum.sel(
model=target_model_name,
kx=slice(0, spectrum.kx.max()))).rename('energy_spectrum_%f' %
threshold)
for threshold in [0.001, 0.01, 0.1, 1.0, 10]
] # pylint: disable=g-complex-comprehension
custom_summaries = u_x_corr_sum + energy_sum + u_t_corr_sum
if ndim == 2:
summaries += [
xr_utils.enstrophy_2d(combined_dataset),
xr_utils.vorticity_2d(combined_dataset),
xr_utils.isotropic_energy_spectrum(
combined_dataset,
average_dims=non_spatial_dims).rename('energy_spectrum')
]
if ndim >= 2:
custom_summaries = []
mean_summaries = [
average(s.sel(model=eval_model_name), ndim).rename(s.name + '_mean')
for s in summaries
]
error_summaries = [
average(absolute_error(s, eval_model_name, target_model_name), ndim)
for s in summaries
]
correlation_summaries = [
state_correlation(s, eval_model_name, target_model_name)
for s in summaries
if s.name in xr_utils.XR_VELOCITY_NAMES + ('vorticity',)
]
summaries_ds = xarray.Dataset({array.name: array for array in summaries})
thresholds = approximate_quantiles(
summaries_ds, quantile_thresholds=quantile_thresholds).compute()
threshold_summaries = []
for threshold_quantile in quantile_thresholds:
for summary in summaries:
name = summary.name
error_threshold = thresholds[name].sel(
quantile=threshold_quantile, drop=True)
below_error = below_error_threshold(summary, error_threshold,
eval_model_name, target_model_name)
below_error.name = f'{name}_within_q={threshold_quantile}'
threshold_summaries.append(average(below_error, ndim))
all_summaries = (
mean_summaries + error_summaries + threshold_summaries +
correlation_summaries + custom_summaries)
return xarray.Dataset({array.name: array for array in all_summaries})
| {
"content_hash": "51b7ea753189ab8665d36aa048e3e3ca",
"timestamp": "",
"source": "github",
"line_count": 325,
"max_line_length": 115,
"avg_line_length": 38.94769230769231,
"alnum_prop": 0.666218991941855,
"repo_name": "google/jax-cfd",
"id": "a4a2925ff0306b14485d190dcf43da7caeddbd7d",
"size": "13233",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "jax_cfd/data/evaluation.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "7029140"
},
{
"name": "Python",
"bytes": "715552"
}
],
"symlink_target": ""
} |
"""
common.py
This module contains the model to persist and then
retrieve data from backends. It is intended to be
a common structure for a all parsed sources.
"""
__author__ = 'Alan Barber'
#python
from collections import namedtuple
Common = namedtuple(
'Common',
[
"type",
"data",
"hash",
"tags",
"linked"
])
| {
"content_hash": "ba6ead49bf058919c0ae7aa4aa12d081",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 50,
"avg_line_length": 16.681818181818183,
"alnum_prop": 0.6076294277929155,
"repo_name": "alanebarber/sabroso",
"id": "efc218b2024afee3e3d4195b7b42446ca7ac7648",
"size": "367",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/application/data_layer/models/common.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "201"
},
{
"name": "JavaScript",
"bytes": "2251"
},
{
"name": "Python",
"bytes": "16814"
}
],
"symlink_target": ""
} |
"""
Main module of the foraging ant simulation.
"""
from cab.complex_automaton import ComplexAutomaton
from abm.fa_agent import HiveAgent
from ca.fa_cell import WorldCell
from fa_global_constants import GC
# from util.fa_visualization import Visualizer
__author__ = 'Michael Wagner'
__version__ = '1.0'
if __name__ == '__main__':
gc = GC()
pc = WorldCell(0, 0, gc)
pa = HiveAgent(0, 0, gc)
# pv = Visualizer(gc, None)
# Use assets to initialize simulation system.
# simulation = ComplexAutomaton(gc, proto_cell=pc, proto_agent=pa, proto_handler=ph, proto_visualizer=pv)
simulation = ComplexAutomaton(gc, proto_cell=pc)
# Run the simulation
simulation.run_main_loop()
# If need be, the simulation can be run in profiling mode too!
# cProfile.run("simulation.run_main_loop()")
| {
"content_hash": "97019f73f267171459063e0067b6d915",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 109,
"avg_line_length": 25.90625,
"alnum_prop": 0.6863691194209891,
"repo_name": "Micutio/CAB_Simulations",
"id": "e6cf3c7c91e0e9793924a77180926ae4a3cb1434",
"size": "829",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ForagingAnt/fa_main.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "119411"
}
],
"symlink_target": ""
} |
import csv
from django import http
from django.conf import settings
from django.contrib import admin, messages
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db.models import Q
from django.shortcuts import redirect, render
from django.views import debug
import commonware.log
import elasticsearch
import jinja2
import mkt
from mkt.developers.models import ActivityLog
from mkt.prices.utils import update_from_csv
from mkt.site.decorators import permission_required
from mkt.site.mail import FakeEmailBackend
from mkt.site.utils import chunked
from mkt.users.models import UserProfile
from mkt.webapps.models import WebappUser, Webapp
from mkt.webapps.tasks import update_manifests
from . import tasks
from .decorators import admin_required
from .forms import DevMailerForm, GenerateErrorForm, PriceTiersForm, YesImSure
from .models import EmailPreviewTopic
log = commonware.log.getLogger('z.zadmin')
@admin_required
def show_settings(request):
settings_dict = debug.get_safe_settings()
for i in ['GOOGLE_ANALYTICS_CREDENTIALS']:
settings_dict[i] = debug.cleanse_setting(i,
getattr(settings, i, {}))
settings_dict['WEBAPPS_RECEIPT_KEY'] = '********************'
return render(request, 'zadmin/settings.html',
{'settings_dict': settings_dict})
@admin_required
def env(request):
return http.HttpResponse(u'<pre>%s</pre>' % (jinja2.escape(request)))
@admin_required
def email_preview_csv(request, topic):
resp = http.HttpResponse()
resp['Content-Type'] = 'text/csv; charset=utf-8'
resp['Content-Disposition'] = "attachment; filename=%s.csv" % (topic)
writer = csv.writer(resp)
fields = ['from_email', 'recipient_list', 'subject', 'body']
writer.writerow(fields)
rs = EmailPreviewTopic(topic=topic).filter().values_list(*fields)
for row in rs:
writer.writerow([r.encode('utf8') for r in row])
return resp
@admin.site.admin_view
def mail(request):
backend = FakeEmailBackend()
if request.method == 'POST':
backend.clear()
return redirect('zadmin.mail')
return render(request, 'zadmin/mail.html', dict(mail=backend.view_all()))
@admin.site.admin_view
def email_devs(request):
form = DevMailerForm(request.POST or None)
preview = EmailPreviewTopic(topic='email-devs')
if preview.filter().count():
preview_csv = reverse('zadmin.email_preview_csv',
args=[preview.topic])
else:
preview_csv = None
if request.method == 'POST' and form.is_valid():
data = form.cleaned_data
qs = (WebappUser.objects.filter(role__in=(mkt.AUTHOR_ROLE_DEV,
mkt.AUTHOR_ROLE_OWNER))
.exclude(user__email=None))
if data['recipients'] in ('payments', 'desktop_apps'):
qs = qs.exclude(webapp__status=mkt.STATUS_DELETED)
else:
qs = qs.filter(webapp__status__in=mkt.LISTED_STATUSES)
if data['recipients'] in ('payments', 'payments_region_enabled',
'payments_region_disabled'):
qs = qs.exclude(webapp__premium_type__in=(mkt.WEBAPP_FREE,
mkt.WEBAPP_OTHER_INAPP))
if data['recipients'] == 'payments_region_enabled':
qs = qs.filter(webapp__enable_new_regions=True)
elif data['recipients'] == 'payments_region_disabled':
qs = qs.filter(webapp__enable_new_regions=False)
elif data['recipients'] in ('apps', 'free_apps_region_enabled',
'free_apps_region_disabled'):
if data['recipients'] == 'free_apps_region_enabled':
qs = qs.filter(webapp__enable_new_regions=True)
elif data['recipients'] == 'free_apps_region_disabled':
qs = qs.filter(webapp__enable_new_regions=False)
elif data['recipients'] == 'desktop_apps':
qs = (qs.filter(
webapp__webappdevicetype__device_type=mkt.DEVICE_DESKTOP.id))
else:
raise NotImplementedError('If you want to support emailing other '
'types of developers, do it here!')
if data['preview_only']:
# Clear out the last batch of previewed emails.
preview.filter().delete()
total = 0
for emails in chunked(set(qs.values_list('user__email', flat=True)),
100):
total += len(emails)
tasks.admin_email.delay(emails, data['subject'], data['message'],
preview_only=data['preview_only'],
preview_topic=preview.topic)
msg = 'Emails queued for delivery: %s' % total
if data['preview_only']:
msg = '%s (for preview only, emails not sent!)' % msg
messages.success(request, msg)
return redirect('zadmin.email_devs')
return render(request, 'zadmin/email-devs.html',
dict(form=form, preview_csv=preview_csv))
@permission_required([('Admin', '%'),
('AdminTools', 'View'),
('ReviewerAdminTools', 'View')])
def index(request):
log = ActivityLog.objects.admin_events()[:5]
return render(request, 'zadmin/index.html', {'log': log})
@admin.site.admin_view
def memcache(request):
form = YesImSure(request.POST or None)
if form.is_valid() and form.cleaned_data['yes']:
cache.clear()
form = YesImSure()
messages.success(request, 'Cache cleared')
if cache._cache and hasattr(cache._cache, 'get_stats'):
stats = cache._cache.get_stats()
else:
stats = []
return render(request, 'zadmin/memcache.html',
{'form': form, 'stats': stats})
@admin_required
def generate_error(request):
form = GenerateErrorForm(request.POST or None)
if request.method == 'POST' and form.is_valid():
form.explode()
return render(request, 'zadmin/generate-error.html', {'form': form})
@permission_required([('Admin', '%'),
('MailingLists', 'View')])
def export_email_addresses(request):
return render(request, 'zadmin/export_button.html', {})
@permission_required([('Admin', '%'),
('MailingLists', 'View')])
def email_addresses_file(request):
resp = http.HttpResponse()
resp['Content-Type'] = 'text/plain; charset=utf-8'
resp['Content-Disposition'] = ('attachment; '
'filename=amo_optin_emails.txt')
emails = (UserProfile.objects.filter(notifications__notification_id=13,
notifications__enabled=1)
.values_list('email', flat=True))
for e in emails:
if e is not None:
resp.write(e + '\n')
return resp
@admin_required
def price_tiers(request):
output = []
form = PriceTiersForm(request.POST or None, request.FILES)
if request.method == 'POST' and form.is_valid():
output = update_from_csv(form.cleaned_data['prices'])
return render(request, 'zadmin/update-prices.html',
{'result': output, 'form': form})
@admin_required(reviewers=True)
def manifest_revalidation(request):
if request.method == 'POST':
# Collect the apps to revalidate.
qs = Q(is_packaged=False, status=mkt.STATUS_PUBLIC,
disabled_by_user=False)
webapp_pks = Webapp.objects.filter(qs).values_list('pk', flat=True)
for pks in chunked(webapp_pks, 100):
update_manifests.delay(list(pks), check_hash=False)
messages.success(request, 'Manifest revalidation queued')
return render(request, 'zadmin/manifest.html')
@admin_required
def elastic(request):
es = elasticsearch.Elasticsearch(hosts=settings.ES_HOSTS)
indexes = set(settings.ES_INDEXES.values())
ctx = {
'aliases': es.indices.get_aliases(),
'health': es.cluster.health(),
'state': es.cluster.state(),
'mappings': [(index, es.indices.get_mapping(index=index))
for index in indexes],
}
return render(request, 'zadmin/elastic.html', ctx)
| {
"content_hash": "6318af154ec4d99baf586592147c3c46",
"timestamp": "",
"source": "github",
"line_count": 228,
"max_line_length": 78,
"avg_line_length": 36.574561403508774,
"alnum_prop": 0.6091857536874925,
"repo_name": "shahbaz17/zamboni",
"id": "4ebbf410ef114b0f84d8fb0fc0b36a0285713958",
"size": "8339",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mkt/zadmin/views.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "357511"
},
{
"name": "HTML",
"bytes": "2331440"
},
{
"name": "JavaScript",
"bytes": "536153"
},
{
"name": "Makefile",
"bytes": "4281"
},
{
"name": "Python",
"bytes": "4400945"
},
{
"name": "Shell",
"bytes": "11200"
},
{
"name": "Smarty",
"bytes": "1159"
}
],
"symlink_target": ""
} |
from __future__ import division
import logging
import time
from quant import config
from quant.brokers import broker_factory
from quant.common import constant
from quant.tool import email_box
from .basicbot import BasicBot
class Maker_ZRX(BasicBot):
"""
./venv/bin/python -m quant.cli -mBitfinex_ZRX_ETH,Binance_ZRX_ETH -oMaker_ZRX -f=maker_zrx -v
"""
def __init__(self):
super(Maker_ZRX, self).__init__()
self.mm_market = 'Bitfinex_ZRX_ETH'
self.hedge_market = 'Binance_ZRX_ETH'
self.brokers = broker_factory.create_brokers([self.mm_market, self.hedge_market])
self.mm_broker = self.brokers[self.mm_market]
self.hedge_broker = self.brokers[self.hedge_market]
self.local_order = {}
self.data_lost_count = 0
self.risk_protect_count = 10
self.fee_hedge_market = 0.002
self.fee_mm_market = 0.002
self.MIN_PRICE_DIFF = 0.02
self.PLACE_AMOUNT_INIT = 10
# 滑价
self.SLID_PRICE = 0.005
self.tick_count = 0
self.last_update_min_stock = 0.0
# Just for bfx
self.min_amount_trade = 6
self.cancel_orders(self.mm_market)
logging.info('liquid_zrx======>Setup complete')
def terminate(self):
super(Maker_ZRX, self).terminate()
self.cancel_orders(self.mm_market)
logging.info('liquid_zrx======>terminate complete')
@classmethod
def cal_hedge_amount_min(cls, price):
return 0.01 / price
def cal_mm_buy_price(self, hedge_bid_price, mm_bid_price):
price = hedge_bid_price * (1 - self.MIN_PRICE_DIFF)
if price > mm_bid_price:
price = round(mm_bid_price * (1 + self.SLID_PRICE), 8)
return price
def cal_handle_amount(self):
return self.local_order['deal_amount'] - self.local_order['hedge_amount']
def risk_protect(self):
self.data_lost_count += 1
if self.data_lost_count > self.risk_protect_count:
logging.warn('liquid_zrx======>risk protect~stop liquid supply. %s' % self.data_lost_count)
self.cancel_orders(self.mm_market)
self.data_lost_count = 0
def update_min_stock(self):
# 更新bfx的最小交易量, 1个小时更新一次
now = time.time()
diff = now - self.last_update_min_stock
if diff > 3600:
min_stock = self.brokers[self.mm_market].get_min_stock()
if min_stock:
self.min_amount_trade = min_stock
self.last_update_min_stock = now
def update_other(self):
self.update_min_stock()
def tick(self, depths):
try:
mm_bid_price, mm_ask_price = self.get_ticker(depths, self.mm_market)
except Exception as e:
logging.debug(e)
return
try:
hedge_bid_price, hedge_ask_price = self.get_ticker(depths, self.hedge_market)
except Exception as e:
logging.debug(e)
self.risk_protect()
return
if not self.local_order:
buy_price = self.cal_mm_buy_price(hedge_bid_price=hedge_bid_price, mm_bid_price=mm_bid_price)
self.place_order(buy_price, self.PLACE_AMOUNT_INIT)
return
order_id = self.local_order['order_id']
logging.info('liquid_zrx======>update local order: %s' % order_id)
# update local order
self.update_order()
# all float type
buy_price = self.cal_mm_buy_price(hedge_bid_price=hedge_bid_price, mm_bid_price=mm_bid_price)
handle_amount = self.cal_handle_amount()
hedge_amount_min = self.cal_hedge_amount_min(hedge_bid_price)
if handle_amount < hedge_amount_min:
order_price = self.local_order['price']
if order_price <= buy_price:
return
logging.info('liquid_zrx======>cancel order: %s, because order_price: %s < buy_price: %s' %
(order_id, order_price, buy_price))
self.cancel_flow(buy_price)
else:
self.hedge_order(handle_amount, hedge_bid_price)
if self.local_order['status'] == constant.ORDER_STATE_PENDING:
self.cancel_flow(buy_price)
def cancel_flow(self, buy_price):
order_id = self.local_order['order_id']
# current order price is not good, so cancel and place again
cancel_res = self.mm_broker.cancel_order(order_id=self.local_order['order_id'])
if not cancel_res:
# cancel failed, just return
logging.info('liquid_zrx======>cancel order: %s failed' % order_id)
return
logging.info('liquid_zrx======>cancel order: %s success' % order_id)
# delete local order
self.local_order = {}
# place new order
self.place_order(buy_price, self.PLACE_AMOUNT_INIT)
def update_order(self):
# update local order
order_id = self.local_order['order_id']
error_count = 0
while True:
resp = self.mm_broker.get_order(order_id=order_id)
if resp:
self.local_order['deal_amount'] = resp['deal_amount']
self.local_order['avg_price'] = resp['avg_price']
self.local_order['status'] = resp['status']
break
error_count += 1
if error_count >= 10:
logging.error("liquid_zrx======>update_order failed more than 10 times")
raise Exception("liquid_zrx======>update_order failed more than 10 times")
time.sleep(config.INTERVAL_RETRY)
def place_order(self, buy_price, buy_amount):
eth_num = self.mm_broker.eth_available
can_buy_max = eth_num / buy_price
buy_amount = min(buy_amount, can_buy_max)
if buy_amount < self.min_amount_trade:
logging.error('liquid_zrx======>buy failed because %s < %s, maybe bfx eth is not enough' %
(eth_num, self.min_amount_trade))
raise Exception('liquid_zrx======>buy failed because %s < %s, maybe bfx eth is not enough' %
(eth_num, self.min_amount_trade))
try:
order_id = self.mm_broker.buy_limit_c(price=buy_price, amount=buy_amount)
except Exception as e:
logging.error("liquid_zrx======>place_order failed, exception: %s" % e)
email_box.send_mail("liquid_zrx======>place_order failed, exception: %s" % e)
return
if not order_id:
logging.error('liquid_zrx======>place_order failed, because order_id is none,that must not happen')
raise Exception('liquid_zrx======>place_order failed, because order_id is none,that must not happen')
self.local_order = {
'order_id': order_id,
'price': buy_price,
'amount': buy_amount,
'deal_amount': 0,
'hedge_amount': 0,
'type': 'buy',
'status': constant.ORDER_STATE_PENDING,
'time': time.time()
}
logging.info('liquid_zrx======>place_order success, order_id: %s' % order_id)
def hedge_order(self, hedge_amount, hedge_price):
# hedge sell in binance
can_sell_max = self.hedge_broker.zrx_available
sell_amount_limit = self.cal_hedge_amount_min(hedge_price)
if can_sell_max < hedge_amount:
# post email
if can_sell_max < sell_amount_limit:
logging.error('liquid_zrx======>hedge_order failed, because can_sell_max: %s < %s' %
(can_sell_max, sell_amount_limit))
raise Exception('liquid_zrx======>hedge_order failed, because can_sell_max: %s < %s' %
(can_sell_max, sell_amount_limit))
sell_amount = can_sell_max
else:
sell_amount = hedge_amount
sell_price = hedge_price
hedge_index = 0
while True:
try:
order_id = self.hedge_broker.sell_limit_c(amount=sell_amount, price=sell_price)
except Exception as e:
logging.error('liquid_zrx======>hedge sell order failed when sell_limit_c, error=%s' % e)
raise Exception('liquid_zrx======>hedge sell order failed when sell_limit_c, error=%s' % e)
deal_amount, avg_price = self.get_deal_amount(self.hedge_market, order_id)
self.local_order['hedge_amount'] += deal_amount
logging.info("liquid_zrx======>hedge sell %s, order_id=%s, amount=%s, price=%s, deal_amount=%s" %
(hedge_index, order_id, sell_amount, avg_price, deal_amount))
diff_amount = round(sell_amount - deal_amount, 8)
sell_amount_limit = self.cal_hedge_amount_min(sell_price)
if diff_amount < sell_amount_limit:
hedge_amount_current = self.local_order['hedge_amount']
hedge_amount_target = self.local_order['deal_amount']
logging.info('liquid_zrx======>hedge sell order success, target=%s, current=%s' %
(hedge_amount_target, hedge_amount_current))
email_box.send_mail('liquid_zrx======>hedge sell order success, target=%s, current=%s' %
(hedge_amount_target, hedge_amount_current))
break
time.sleep(config.INTERVAL_API)
ticker = self.get_latest_ticker(self.hedge_market)
sell_amount = diff_amount
sell_price = ticker['bid']
hedge_index += 1
@classmethod
def get_ticker(cls, depths, market):
bid_price = depths[market]["bids"][0]['price']
ask_price = depths[market]["asks"][0]['price']
return bid_price, ask_price
def update_balance(self):
self.mm_broker.get_balances_c()
self.hedge_broker.get_balances_c()
def cancel_orders(self, market):
logging.info('liquid_zrx======>cancel zrx orders on bitfinex')
self.brokers[market].cancel_orders()
| {
"content_hash": "c1e3693f17ed92b7d4abfff65e6edbb6",
"timestamp": "",
"source": "github",
"line_count": 256,
"max_line_length": 113,
"avg_line_length": 39.34765625,
"alnum_prop": 0.5718256725900923,
"repo_name": "doubleDragon/QuantBot",
"id": "9d852219328c10e22f183315738722d9f28a85a9",
"size": "10153",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "quant/observers/maker_zrx.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "599075"
}
],
"symlink_target": ""
} |
""" Test behavior of NLHBase. """
from nlhtree import NLHTree
__all__ = [
'NLHBase',
]
class NLHBase(object):
""" Test behavior of NLHBase. """
def __init__(self, name, hashtype):
self._root = NLHTree(name, hashtype) # immutable ref to a NLHTree
self._cur_tree = self._root # the current tree; mutable
self._hashtype = hashtype
@property
def name(self):
""" Return the name of the tree. """
return self._root.name
@property
def hashtype(self):
""" Return which hash type we are using. """
return self._root.hashtype
@property
def root(self):
""" Return the root of the current tree. """
return self._root
@property
def cur_tree(self):
""" Return the current tree. """
return self._cur_tree
@cur_tree.setter
def cur_tree(self, path):
"""
Make the given path the root of the tree.
XXX STUBBED
"""
if not path:
raise RuntimeError('path may not be None or empty')
# needs to handle more complex cases
path = path.strip()
parts = path.split('/') # many possible problems ignored
if parts:
raise NotImplementedError("can't handle multi-part paths yet")
# XXX if the path begins with a forward slash ('/'), then
# tentatively set the current tree to the root and then
# apply the normal relpath logic from there
# find a node with this name
# XXX STUB XXX
# if it's a leaf, error
# XXX STUB XXX
# otherwise set cur_tree to point to this node
self._cur_tree = path
| {
"content_hash": "9f90e73f219e0358b4d0328c2df82141",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 76,
"avg_line_length": 24.642857142857142,
"alnum_prop": 0.5611594202898551,
"repo_name": "jddixon/nlhtree_py",
"id": "7ef2142401f427f843adfc5e1440571c86484c8f",
"size": "1755",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/nlhtree/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6648"
},
{
"name": "Python",
"bytes": "95277"
},
{
"name": "Shell",
"bytes": "1859"
}
],
"symlink_target": ""
} |
import logging
import json
import sys
from dlab.fab import *
from dlab.meta_lib import *
from dlab.actions_lib import *
import os
import argparse
import traceback
parser = argparse.ArgumentParser()
parser.add_argument('--uuid', type=str, default='')
args = parser.parse_args()
if __name__ == "__main__":
instance_class = 'notebook'
local_log_filename = "{}_{}_{}.log".format(os.environ['conf_resource'], os.environ['edge_user_name'],
os.environ['request_id'])
local_log_filepath = "/logs/" + os.environ['conf_resource'] + "/" + local_log_filename
logging.basicConfig(format='%(levelname)-8s [%(asctime)s] %(message)s',
level=logging.DEBUG,
filename=local_log_filepath)
notebook_config = dict()
try:
notebook_config['exploratory_name'] = os.environ['exploratory_name']
except:
notebook_config['exploratory_name'] = ''
notebook_config['service_base_name'] = os.environ['conf_service_base_name']
notebook_config['instance_type'] = os.environ['aws_notebook_instance_type']
notebook_config['key_name'] = os.environ['conf_key_name']
notebook_config['user_keyname'] = os.environ['edge_user_name']
notebook_config['instance_name'] = '{}-{}-nb-{}-{}'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'],
notebook_config['exploratory_name'], args.uuid)
notebook_config['expected_image_name'] = '{}-{}-notebook-image'.format(notebook_config['service_base_name'],
os.environ['application'])
notebook_config['notebook_image_name'] = str(os.environ.get('notebook_image_name'))
notebook_config['role_profile_name'] = '{}-{}-nb-de-Profile' \
.format(notebook_config['service_base_name'].lower().replace('-', '_'), os.environ['edge_user_name'])
notebook_config['security_group_name'] = '{}-{}-nb-SG'.format(notebook_config['service_base_name'],
os.environ['edge_user_name'])
notebook_config['tag_name'] = '{}-Tag'.format(notebook_config['service_base_name'])
notebook_config['dlab_ssh_user'] = os.environ['conf_os_user']
notebook_config['shared_image_enabled'] = os.environ['conf_shared_image_enabled']
tag = {"Key": notebook_config['tag_name'],
"Value": "{}-{}-subnet".format(notebook_config['service_base_name'], os.environ['edge_user_name'])}
notebook_config['subnet_cidr'] = get_subnet_by_tag(tag)
# generating variables regarding EDGE proxy on Notebook instance
instance_hostname = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
edge_instance_name = os.environ['conf_service_base_name'] + "-" + os.environ['edge_user_name'] + '-edge'
edge_instance_hostname = get_instance_hostname(notebook_config['tag_name'], edge_instance_name)
edge_instance_ip = get_instance_ip_address(notebook_config['tag_name'], edge_instance_name).get(
'Public')
keyfile_name = "{}{}.pem".format(os.environ['conf_key_dir'], os.environ['conf_key_name'])
try:
if os.environ['conf_os_family'] == 'debian':
initial_user = 'ubuntu'
sudo_group = 'sudo'
if os.environ['conf_os_family'] == 'redhat':
initial_user = 'ec2-user'
sudo_group = 'wheel'
logging.info('[CREATING DLAB SSH USER]')
print('[CREATING DLAB SSH USER]')
params = "--hostname {} --keyfile {} --initial_user {} --os_user {} --sudo_group {}".format\
(instance_hostname, os.environ['conf_key_dir'] + os.environ['conf_key_name'] + ".pem", initial_user,
notebook_config['dlab_ssh_user'], sudo_group)
try:
local("~/scripts/{}.py {}".format('create_ssh_user', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed creating ssh user 'dlab'.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# configuring proxy on Notebook instance
try:
logging.info('[CONFIGURE PROXY ON TENSOR INSTANCE]')
print('[CONFIGURE PROXY ON TENSOR INSTANCE]')
additional_config = {"proxy_host": edge_instance_hostname, "proxy_port": "3128"}
params = "--hostname {} --instance_name {} --keyfile {} --additional_config '{}' --os_user {}"\
.format(instance_hostname, notebook_config['instance_name'], keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('common_configure_proxy', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure proxy.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# updating repositories & installing python packages
try:
logging.info('[INSTALLING PREREQUISITES TO TENSOR NOTEBOOK INSTANCE]')
print('[INSTALLING PREREQUISITES TO TENSOR NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} --user {} --region {}".\
format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], os.environ['aws_region'])
try:
local("~/scripts/{}.py {}".format('install_prerequisites', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing apps: apt & pip.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# installing and configuring TensorFlow and all dependencies
try:
logging.info('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
print('[CONFIGURE TENSORFLOW NOTEBOOK INSTANCE]')
params = "--hostname {} --keyfile {} " \
"--region {} --os_user {} " \
"--exploratory_name {}" \
.format(instance_hostname, keyfile_name,
os.environ['aws_region'], notebook_config['dlab_ssh_user'],
notebook_config['exploratory_name'])
try:
local("~/scripts/{}.py {}".format('configure_tensor_node', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to configure TensorFlow.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[INSTALLING USERs KEY]')
logging.info('[INSTALLING USERs KEY]')
additional_config = {"user_keyname": notebook_config['user_keyname'],
"user_keydir": os.environ['conf_key_dir']}
params = "--hostname {} --keyfile {} --additional_config '{}' --user {}".format(
instance_hostname, keyfile_name, json.dumps(additional_config), notebook_config['dlab_ssh_user'])
try:
local("~/scripts/{}.py {}".format('install_user_key', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed installing users key.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP USER GIT CREDENTIALS]')
logging.info('[SETUP USER GIT CREDENTIALS]')
params = '--os_user {} --notebook_ip {} --keyfile "{}"' \
.format(notebook_config['dlab_ssh_user'], instance_hostname, keyfile_name)
try:
local("~/scripts/{}.py {}".format('manage_git_creds', params))
except:
append_result("Failed setup git credentials")
raise Exception
except Exception as err:
append_result("Failed to setup git credentials.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
logging.info('[POST CONFIGURING PROCESS]')
print('[POST CONFIGURING PROCESS')
if notebook_config['notebook_image_name'] not in [notebook_config['expected_image_name'], 'None']:
params = "--hostname {} --keyfile {} --os_user {} --nb_tag_name {} --nb_tag_value {}" \
.format(instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'],
notebook_config['tag_name'], notebook_config['instance_name'])
try:
local("~/scripts/{}.py {}".format('common_remove_remote_kernels', params))
except:
traceback.print_exc()
raise Exception
except Exception as err:
append_result("Failed to post configuring instance.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
try:
print('[SETUP EDGE REVERSE PROXY TEMPLATE]')
logging.info('[SETUP EDGE REVERSE PROXY TEMPLATE]')
additional_info = {
'instance_hostname': instance_hostname,
'tensor': True
}
params = "--edge_hostname {} --keyfile {} --os_user {} --type {} --exploratory_name {} --additional_info '{}'"\
.format(edge_instance_hostname, keyfile_name, notebook_config['dlab_ssh_user'], 'jupyter',notebook_config['exploratory_name'], json.dumps(additional_info))
try:
local("~/scripts/{}.py {}".format('common_configure_reverse_proxy', params))
except:
append_result("Failed edge reverse proxy template")
raise Exception
except Exception as err:
append_result("Failed to set edge reverse proxy template.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
if notebook_config['shared_image_enabled'] == 'true':
try:
print('[CREATING AMI]')
ami_id = get_ami_id_by_name(notebook_config['expected_image_name'])
if ami_id == '':
print("Looks like it's first time we configure notebook server. Creating image.")
image_id = create_image_from_instance(tag_name=notebook_config['tag_name'],
instance_name=notebook_config['instance_name'],
image_name=notebook_config['expected_image_name'])
if image_id != '':
print("Image was successfully created. It's ID is {}".format(image_id))
except Exception as err:
append_result("Failed creating image.", str(err))
remove_ec2(notebook_config['tag_name'], notebook_config['instance_name'])
sys.exit(1)
# generating output information
ip_address = get_instance_ip_address(notebook_config['tag_name'], notebook_config['instance_name']).get('Private')
dns_name = get_instance_hostname(notebook_config['tag_name'], notebook_config['instance_name'])
tensorboard_url = "http://" + ip_address + ":6006/"
jupyter_ip_url = "http://" + ip_address + ":8888/{}/".format(notebook_config['exploratory_name'])
jupyter_notebook_acces_url = "http://" + edge_instance_ip + "/{}/".format(notebook_config['exploratory_name'])
tensorboard_acces_url = "http://" + edge_instance_ip + "/{}-tensor/".format(notebook_config['exploratory_name'])
jupyter_ungit_acces_url = "http://" + edge_instance_ip + "/{}-ungit/".format(notebook_config['exploratory_name'])
ungit_ip_url = "http://" + ip_address + ":8085/{}-ungit/".format(notebook_config['exploratory_name'])
print('[SUMMARY]')
logging.info('[SUMMARY]')
print("Instance name: {}".format(notebook_config['instance_name']))
print("Private DNS: {}".format(dns_name))
print("Private IP: {}".format(ip_address))
print("Instance ID: {}".format(get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name'])))
print("Instance type: {}".format(notebook_config['instance_type']))
print("Key name: {}".format(notebook_config['key_name']))
print("User key name: {}".format(notebook_config['user_keyname']))
print("AMI name: {}".format(notebook_config['notebook_image_name']))
print("Profile name: {}".format(notebook_config['role_profile_name']))
print("SG name: {}".format(notebook_config['security_group_name']))
print("TensorBoard URL: {}".format(tensorboard_url))
print("TensorBoard log dir: /var/log/tensorboard")
print("Jupyter URL: {}".format(jupyter_ip_url))
print("Ungit URL: {}".format(ungit_ip_url))
print('SSH access (from Edge node, via IP address): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], ip_address))
print('SSH access (from Edge node, via FQDN): ssh -i {0}.pem {1}@{2}'.format(notebook_config['key_name'], notebook_config['dlab_ssh_user'], dns_name))
with open("/root/result.json", 'w') as result:
res = {"hostname": dns_name,
"ip": ip_address,
"instance_id": get_instance_by_name(notebook_config['tag_name'], notebook_config['instance_name']),
"master_keyname": os.environ['conf_key_name'],
"tensorboard_log_dir": "/var/log/tensorboard",
"notebook_name": notebook_config['instance_name'],
"notebook_image_name": notebook_config['notebook_image_name'],
"Action": "Create new notebook server",
"exploratory_url": [
{"description": "Jupyter",
"url": jupyter_notebook_acces_url},
{"description": "TensorBoard",
"url": tensorboard_acces_url},
{"description": "Ungit",
"url": jupyter_ungit_acces_url},
{"description": "Jupyter (via tunnel)",
"url": jupyter_ip_url},
{"description": "TensorBoard (via tunnel)",
"url": tensorboard_url},
{"description": "Ungit (via tunnel)",
"url": ungit_ip_url}
]}
result.write(json.dumps(res)) | {
"content_hash": "e8f9fed8895da855db9eba99d3aaed60",
"timestamp": "",
"source": "github",
"line_count": 271,
"max_line_length": 167,
"avg_line_length": 53.53136531365314,
"alnum_prop": 0.5881988005790308,
"repo_name": "epam/DLab",
"id": "1e9fe8eb5fe0b6b01b45b34fda991329bceed3b3",
"size": "15276",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infrastructure-provisioning/src/general/scripts/aws/tensor_configure.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "81633"
},
{
"name": "HTML",
"bytes": "110323"
},
{
"name": "Java",
"bytes": "2473499"
},
{
"name": "Jupyter Notebook",
"bytes": "80955"
},
{
"name": "Python",
"bytes": "1861086"
},
{
"name": "R",
"bytes": "4894"
},
{
"name": "Ruby",
"bytes": "62731"
},
{
"name": "Shell",
"bytes": "18826"
},
{
"name": "TypeScript",
"bytes": "363308"
}
],
"symlink_target": ""
} |
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_prefix = '_'.join(['msg'] + os.path.basename(__file__).replace('.', '_').split('_')[1:5])
log_file_name = "{}_{}.log".format(log_prefix, sys.argv[1])
# Composite classes
class Dyna_QAgent(DynaMixin, QAgent):
def __init__(self, **kwargs):
super(Dyna_QAgent, self).__init__(**kwargs)
# Parameters
# |- Data
location = 'gym'
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.9, 0.9 # TD backup
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - No Phi
phi_length = 0
dim_state = (1, 1, 3)
range_state = ((((0, 10), (0, 10), (0, 10)),),)
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size, batch_size, update_period, freeze_period = 2, 200, 100, 4, 16
reward_scaling, reward_scaling_update, rs_period = 1, 'adaptive', 32 # reward scaling
# |- Env model
model_type, traffic_window_size = 'IPP', 50
stride, n_iter, adjust_offset = 2, 3, 1e-22
eval_period, eval_len = 4, 100
n_belief_bins, max_queue_len = 5, 20
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, -0.5
traffic_params = (model_type, traffic_window_size,
stride, n_iter, adjust_offset,
eval_period, eval_len,
n_belief_bins)
queue_params = (max_queue_len,)
beta = 0.5 # R = (1-beta)*ServiceReward + beta*Cost
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- DynaQ
num_sim = 5
# |- Env
# |- Time
start_time = pd.to_datetime("2014-10-15 09:40:00")
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_{}.dat'.format(location),
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
env_model = SJTUModel(traffic_params, queue_params, reward_params, 2)
agent = Dyna_QAgent(
env_model=env_model, num_sim=num_sim,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| {
"content_hash": "a6233140b45f39e85473cc22b13b9dae",
"timestamp": "",
"source": "github",
"line_count": 138,
"max_line_length": 103,
"avg_line_length": 33.31159420289855,
"alnum_prop": 0.6497715901675005,
"repo_name": "zaxliu/deepnap",
"id": "efabd8516978796f715bed1b20adcd12deaf5f2b",
"size": "4623",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "experiments/kdd-exps/experiment_DynaQtable_130_Feb14_0029.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "571252"
},
{
"name": "Python",
"bytes": "520535"
}
],
"symlink_target": ""
} |
from basic import login, BasicCtrl
class LeaveCtrl(BasicCtrl):
@login
def get(self):
self.render('leave.html')
@login
def post(self):
self.del_current_sess()
self.redirect('/')
| {
"content_hash": "fce4e51861a8bb567e8b5fe36e693b19",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 34,
"avg_line_length": 19.90909090909091,
"alnum_prop": 0.6027397260273972,
"repo_name": "finron/luokr.com",
"id": "0dcf8545a16ebfa007f60f12a672d8403c68fb0d",
"size": "234",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "www.luokr.com/app/ctrls/leave.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1070"
},
{
"name": "CSS",
"bytes": "116766"
},
{
"name": "HTML",
"bytes": "121373"
},
{
"name": "JavaScript",
"bytes": "323250"
},
{
"name": "Nginx",
"bytes": "1837"
},
{
"name": "Python",
"bytes": "1266848"
}
],
"symlink_target": ""
} |
"""A set of utilities to load score files with different formats.
"""
import numpy
import tarfile
import os
def open_file(filename, mode='rt'):
"""open_file(filename) -> file_like
Opens the given score file for reading.
Score files might be raw text files, or a tar-file including a single score file inside.
**Parameters:**
``filename`` : str or file-like
The name of the score file to open, or a file-like object open for reading.
If a file name is given, the according file might be a raw text file or a (compressed) tar file containing a raw text file.
**Returns:**
``file_like`` : file-like
A read-only file-like object as it would be returned by open().
"""
if not isinstance(filename, str) and hasattr(filename, 'read'):
# It seems that this is an open file
return filename
if not os.path.isfile(filename):
raise IOError("Score file '%s' does not exist." % filename)
if not tarfile.is_tarfile(filename):
return open(filename, mode)
# open the tar file for reading
tar = tarfile.open(filename, 'r')
# get the first file in the tar file
tar_info = tar.next()
while tar_info is not None and not tar_info.isfile():
tar_info = tar.next()
# check that one file was found in the archive
if tar_info is None:
raise IOError("The given file is a .tar file, but it does not contain any file.")
# open the file for reading
return tar.extractfile(tar_info)
def four_column(filename):
"""four_column(filename) -> claimed_id, real_id, test_label, score
Loads a score set from a single file and yield its lines (to avoid loading the score file at once into memory).
This function verifies that all fields are correctly placed and contain valid fields.
The score file must contain the following information in each line:
claimed_id real_id test_label score
**Parametes:**
``filename`` : str or file-like
The file object that will be opened with :py:func:`open_file` containing the scores.
**Yields:**
``claimed_id`` : str
The claimed identity -- the client name of the model that was used in the comparison
``real_id`` : str
The real identity -- the client name of the probe that was used in the comparison
``test_label`` : str
A label of the probe -- usually the probe file name, or the probe id
``score`` : float
The result of the comparison of the model and the probe
"""
for i, l in enumerate(open_file(filename)):
if isinstance(l, bytes): l = l.decode('utf-8')
s = l.strip()
if len(s) == 0 or s[0] == '#': continue #empty or comment
field = [k.strip() for k in s.split()]
if len(field) < 4:
raise SyntaxError('Line %d of file "%s" is invalid: %s' % (i, filename, l))
try:
score = float(field[3])
except:
raise SyntaxError('Cannot convert score to float at line %d of file "%s": %s' % (i, filename, l))
yield (field[0], field[1], field[2], score)
def split_four_column(filename):
"""split_four_column(filename) -> negatives, positives
Loads a score set from a single file and splits the scores
between negatives and positives. The score file has to respect the 4 column
format as defined in the method :py:func:`four_column`.
This method avoids loading and allocating memory for the strings present in
the file. We only keep the scores.
**Parameters:**
``filename`` : str or file-like
The file that will be opened with :py:func:`open_file` containing the scores.
**Returns:**
``negatives`` : array_like(1D, float)
The list of ``score``'s, for which the ``claimed_id`` and the ``real_id`` differed (see :py:func:`four_column`).
``positives`` : array_like(1D, float)
The list of ``score``'s, for which the ``claimed_id`` and the ``real_id`` are identical (see :py:func:`four_column`).
"""
score_lines = load_score(filename, 4)
return get_negatives_positives(score_lines)
def cmc_four_column(filename):
"""cmc_four_column(filename) -> cmc_scores
Loads scores to compute CMC curves from a file in four column format.
The four column file needs to be in the same format as described in :py:func:`four_column`,
and the ``test_label`` (column 3) has to contain the test/probe file name or a probe id.
This function returns a list of tuples.
For each probe file, the tuple consists of a list of negative scores and a list of positive scores.
Usually, the list of positive scores should contain only one element, but more are allowed.
The result of this function can directly be passed to, e.g., the :py:func:`bob.measure.cmc` function.
**Parameters:**
``filename`` : str or file-like
The file that will be opened with :py:func:`open_file` containing the scores.
**Returns:**
``cmc_scores`` : [(array_like(1D, float), array_like(1D, float))]
A list of tuples, where each tuple contains the ``negative`` and ``positive`` scores for one probe of the database
"""
# extract positives and negatives
pos_dict = {}
neg_dict = {}
# read four column list
for (client_id, probe_id, probe_name, score_str) in four_column(filename):
try:
score = float(score_str)
# check in which dict we have to put the score
if client_id == probe_id:
correct_dict = pos_dict
else:
correct_dict = neg_dict
# append score
if probe_name in correct_dict:
correct_dict[probe_name].append(score)
else:
correct_dict[probe_name] = [score]
except:
raise SyntaxError("Cannot convert score '%s' to float" % score_str)
# convert to lists of tuples of ndarrays
retval = []
import logging
logger = logging.getLogger('bob')
for probe_name in sorted(pos_dict.keys()):
if probe_name in neg_dict:
retval.append((numpy.array(neg_dict[probe_name], numpy.float64), numpy.array(pos_dict[probe_name], numpy.float64)))
else:
logger.warn('For probe name "%s" there are only positive scores. This probe name is ignored.' % probe_name)
#test if there are probes for which only negatives exist
for probe_name in sorted(neg_dict.keys()):
if not probe_name in pos_dict.keys():
logger.warn('For probe name "%s" there are only negative scores. This probe name is ignored.' % probe_name)
return retval
def five_column(filename):
"""five_column(filename) -> claimed_id, model_label, real_id, test_label, score
Loads a score set from a single file and yield its lines (to avoid loading the score file at once into memory).
This function verifies that all fields are correctly placed and contain valid fields.
The score file must contain the following information in each line:
claimed_id model_label real_id test_label score
**Parametes:**
``filename`` : str or file-like
The file object that will be opened with :py:func:`open_file` containing the scores.
**Yields:**
``claimed_id`` : str
The claimed identity -- the client name of the model that was used in the comparison
``model_label`` : str
A label for the model -- usually the model file name, or the model id
``real_id`` : str
The real identity -- the client name of the probe that was used in the comparison
``test_label`` : str
A label of the probe -- usually the probe file name, or the probe id
``score`` : float
The result of the comparison of the model and the probe.
"""
for i, l in enumerate(open_file(filename)):
if isinstance(l, bytes): l = l.decode('utf-8')
s = l.strip()
if len(s) == 0 or s[0] == '#': continue #empty or comment
field = [k.strip() for k in s.split()]
if len(field) < 5:
raise SyntaxError('Line %d of file "%s" is invalid: %s' % (i, filename, l))
try:
score = float(field[4])
except:
raise SyntaxError('Cannot convert score to float at line %d of file "%s": %s' % (i, filename, l))
yield (field[0], field[1], field[2], field[3], score)
def split_five_column(filename):
"""split_five_column(filename) -> negatives, positives
Loads a score set from a single file in five column format and splits the scores
between negatives and positives. The score file has to respect the 4 column
format as defined in the method :py:func:`five_column`.
This method avoids loading and allocating memory for the strings present in
the file. We only keep the scores.
**Parameters:**
``filename`` : str or file-like
The file that will be opened with :py:func:`open_file` containing the scores.
**Returns:**
``negatives`` : array_like(1D, float)
The list of ``score``'s, for which the ``claimed_id`` and the ``real_id`` differed (see :py:func:`five_column`).
``positives`` : array_like(1D, float)
The list of ``score``'s, for which the ``claimed_id`` and the ``real_id`` are identical (see :py:func:`five_column`).
"""
score_lines = load_score(filename, 5)
return get_negatives_positives(score_lines)
def cmc_five_column(filename):
"""cmc_four_column(filename) -> cmc_scores
Loads scores to compute CMC curves from a file in five column format.
The four column file needs to be in the same format as described in :py:func:`five_column`,
and the ``test_label`` (column 4) has to contain the test/probe file name or a probe id.
This function returns a list of tuples.
For each probe file, the tuple consists of a list of negative scores and a list of positive scores.
Usually, the list of positive scores should contain only one element, but more are allowed.
The result of this function can directly be passed to, e.g., the :py:func:`bob.measure.cmc` function.
**Parameters:**
``filename`` : str or file-like
The file that will be opened with :py:func:`open_file` containing the scores.
**Returns:**
``cmc_scores`` : [(array_like(1D, float), array_like(1D, float))]
A list of tuples, where each tuple contains the ``negative`` and ``positive`` scores for one probe of the database
"""
# extract positives and negatives
pos_dict = {}
neg_dict = {}
# read four column list
for (client_id, _, probe_id, probe_name, score) in five_column(filename):
# check in which dict we have to put the score
if client_id == probe_id:
correct_dict = pos_dict
else:
correct_dict = neg_dict
# append score
if probe_name in correct_dict:
correct_dict[probe_name].append(score)
else:
correct_dict[probe_name] = [score]
# convert to lists of tuples of ndarrays
retval = []
import logging
logger = logging.getLogger('bob')
for probe_name in sorted(pos_dict.keys()):
if probe_name in neg_dict:
retval.append((numpy.array(neg_dict[probe_name], numpy.float64), numpy.array(pos_dict[probe_name], numpy.float64)))
else:
logger.warn('For probe name "%s" there are only positive scores. This probe name is ignored.' % probe_name)
# test if there are probes for which only negatives exist
for probe_name in sorted(neg_dict.keys()):
if not probe_name in pos_dict.keys():
logger.warn('For probe name "%s" there are only negative scores. This probe name is ignored.' % probe_name)
return retval
def load_score(filename, ncolumns=None):
"""Load scores using numpy.loadtxt and return the data as a numpy array.
**Parameters:**
``filename`` : str or file-like
A path or file-like object that will be read with :py:func:`numpy.loadtxt`
containing the scores.
``ncolumns`` : 4 or 5 [default is 4]
Specifies the number of columns in the score file.
**Returns:**
``score_lines`` : numpy array
An array which contains not only the actual scores but also the
'claimed_id', 'real_id', 'test_label', and ['model_label']
"""
if ncolumns is None:
ncolumns = 4
def convertfunc(x):
return x
if ncolumns == 4:
names = ('claimed_id', 'real_id', 'test_label', 'score')
converters = {
0: convertfunc,
1: convertfunc,
2: convertfunc,
3: float}
elif ncolumns == 5:
names = ('claimed_id', 'model_label', 'real_id', 'test_label', 'score')
converters = {
0: convertfunc,
1: convertfunc,
2: convertfunc,
3: convertfunc,
4: float}
else:
raise ValueError("ncolumns of 4 and 5 are supported only.")
score_lines = numpy.genfromtxt(
open_file(filename, mode='rb'), dtype=None, names=names,
converters=converters, invalid_raise=True)
new_dtype = []
for name in score_lines.dtype.names[:-1]:
new_dtype.append((name, str(score_lines.dtype[name]).replace('S', 'U')))
new_dtype.append(('score', float))
score_lines = numpy.array(score_lines, new_dtype)
return score_lines
def get_negatives_positives(score_lines):
"""Take the output of load_score and return negatives and positives.
This function aims to replace split_four_column and split_five_column
but takes a different input. It's up to you to use which one.
"""
pos_mask = score_lines['claimed_id'] == score_lines['real_id']
positives = score_lines['score'][pos_mask]
negatives = score_lines['score'][numpy.logical_not(pos_mask)]
return (negatives, positives)
def get_negatives_positives_all(score_lines_list):
"""Take a list of outputs of load_score and return stacked negatives and
positives."""
negatives, positives = [], []
for score_lines in score_lines_list:
neg_pos = get_negatives_positives(score_lines)
negatives.append(neg_pos[0])
positives.append(neg_pos[1])
negatives = numpy.vstack(negatives).T
positives = numpy.vstack(positives).T
return (negatives, positives)
def get_all_scores(score_lines_list):
"""Take a list of outputs of load_score and return stacked scores"""
return numpy.vstack([score_lines['score']
for score_lines in score_lines_list]).T
def dump_score(filename, score_lines):
"""Dump scores that were loaded using :py:func:`load_score`
The number of columns is automatically detected.
"""
if len(score_lines.dtype) == 5:
fmt = '%s %s %s %s %.9f'
elif len(score_lines.dtype) == 4:
fmt = '%s %s %s %.9f'
else:
raise ValueError("Only scores with 4 and 5 columns are supported.")
numpy.savetxt(filename, score_lines, fmt=fmt)
| {
"content_hash": "41e18b6a99a5ad36009f319895a2fdbc",
"timestamp": "",
"source": "github",
"line_count": 407,
"max_line_length": 127,
"avg_line_length": 34.79115479115479,
"alnum_prop": 0.6763418079096045,
"repo_name": "tiagofrepereira2012/bob.measure",
"id": "63246db0e4048bf1e13cfd624113bd3f2de665c1",
"size": "14284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "bob/measure/load.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C++",
"bytes": "77343"
},
{
"name": "Mask",
"bytes": "2056"
},
{
"name": "Python",
"bytes": "111280"
}
],
"symlink_target": ""
} |
import datetime
from django import forms
from django.conf import settings
from django.utils.translation import ugettext_lazy as _
from django.contrib.contenttypes.models import ContentType
from django.utils.encoding import force_unicode
from django_comments.forms import CommentForm
from .models import ThreadedComment
COMMENT_MAX_LENGTH = getattr(settings,'COMMENT_MAX_LENGTH', 3000)
class ThreadedCommentForm(CommentForm):
parent = forms.IntegerField(required=False, widget=forms.HiddenInput)
comment = forms.CharField(
label=_('Comment'),
widget=forms.Textarea(attrs={'class':'form-control',}),
max_length=COMMENT_MAX_LENGTH
)
def __init__(self, target_object, parent=None, data=None, initial=None):
self.parent = parent
if initial is None:
initial = {}
initial.update({'parent': self.parent})
super(ThreadedCommentForm, self).__init__(target_object, data=data, initial=initial)
def get_comment_model(self):
return ThreadedComment
def get_comment_create_data(self):
# Use the data of the superclass, and remove extra fields
return dict(
content_type = ContentType.objects.get_for_model(self.target_object),
object_pk = force_unicode(self.target_object._get_pk_val()),
comment = self.cleaned_data["comment"],
submit_date = datetime.datetime.now(),
site_id = settings.SITE_ID,
is_public = True,
is_removed = False,
parent_id = self.cleaned_data['parent']
)
ThreadedCommentForm.base_fields.pop('name')
ThreadedCommentForm.base_fields.pop('email')
ThreadedCommentForm.base_fields.pop('url')
| {
"content_hash": "18aa22e12a286bd0f1123ddbd8fabe65",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 92,
"avg_line_length": 35.46938775510204,
"alnum_prop": 0.6708860759493671,
"repo_name": "underlost/GamerNews",
"id": "208cd1cd2593455cf72553f174d2df88e4aeb223",
"size": "1738",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "gamernews/apps/threadedcomments/forms.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "226951"
},
{
"name": "JavaScript",
"bytes": "135586"
},
{
"name": "Python",
"bytes": "124181"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import models, migrations
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('storage', '0005_auto_20170127_1412'),
]
operations = [
migrations.AlterField(
model_name='scalefile',
name='file_type',
field=models.CharField(default='SOURCE', max_length=50, db_index=True, choices=[('SOURCE', 'SOURCE'), ('PRODUCT', 'PRODUCT')]),
preserve_default=True,
),
migrations.AlterField(
model_name='scalefile',
name='job',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, blank=True, to='job.Job', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='scalefile',
name='job_exe',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, blank=True, to='job.JobExecution', null=True),
preserve_default=True,
),
migrations.AlterField(
model_name='scalefile',
name='job_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, blank=True, to='job.JobType', null=True),
preserve_default=True,
),
]
| {
"content_hash": "3ef197de34e76cda31ad96fdc63461ca",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 139,
"avg_line_length": 35.026315789473685,
"alnum_prop": 0.5957926371149511,
"repo_name": "ngageoint/scale",
"id": "df0df74ef8f9169a5b38f4ba144e1bf129ccaf5e",
"size": "1355",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "scale/storage/migrations/0006_auto_20170127_1423.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "7219"
},
{
"name": "CSS",
"bytes": "12193"
},
{
"name": "Dockerfile",
"bytes": "14853"
},
{
"name": "HCL",
"bytes": "301"
},
{
"name": "HTML",
"bytes": "48818"
},
{
"name": "JavaScript",
"bytes": "503"
},
{
"name": "Makefile",
"bytes": "5852"
},
{
"name": "Python",
"bytes": "5295677"
},
{
"name": "Shell",
"bytes": "26650"
}
],
"symlink_target": ""
} |
"""Extensions module. Each extension is initialized in the app factory located
in app.py
"""
from flask_bcrypt import Bcrypt
bcrypt = Bcrypt()
from flask_login import LoginManager
login_manager = LoginManager()
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from flask_migrate import Migrate
migrate = Migrate()
from flask_cache import Cache
cache = Cache()
from flask_debugtoolbar import DebugToolbarExtension
debug_toolbar = DebugToolbarExtension()
from flask_mail import Mail
mail = Mail()
from flask_pymongo import PyMongo
mongo = PyMongo() | {
"content_hash": "6abdb0af658d2de6b1d10f1bb641629b",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 78,
"avg_line_length": 20.85185185185185,
"alnum_prop": 0.7886323268206039,
"repo_name": "binking/News_website",
"id": "9b53e9882ff725c6f3b343e608f71d7ea0d13e0b",
"size": "587",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "news_website/extensions.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "1170"
},
{
"name": "HTML",
"bytes": "46071"
},
{
"name": "JavaScript",
"bytes": "306251"
},
{
"name": "Python",
"bytes": "85517"
}
],
"symlink_target": ""
} |
import _plotly_utils.basevalidators
class LenValidator(_plotly_utils.basevalidators.NumberValidator):
def __init__(self, plotly_name="len", parent_name="bar.marker.colorbar", **kwargs):
super(LenValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
min=kwargs.pop("min", 0),
role=kwargs.pop("role", "style"),
**kwargs
)
| {
"content_hash": "6b68be29d24abd3c0135bbeb346f0b45",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 87,
"avg_line_length": 37.30769230769231,
"alnum_prop": 0.5938144329896907,
"repo_name": "plotly/python-api",
"id": "8528a13ae3b050d24746333d8d717d4c2742279c",
"size": "485",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/bar/marker/colorbar/_len.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
} |
import logging
from archinfo.arch_soot import (ArchSoot, SootAddressDescriptor,
SootAddressTerminator, SootArgument,
SootMethodDescriptor)
from ... import sim_options as o
from ...errors import SimEngineError, SimTranslationError
from cle import CLEError
from ...state_plugins.inspect import BP_AFTER, BP_BEFORE
from ...sim_type import SimTypeNum, SimTypeFunction, parse_type
from ..engine import SuccessorsMixin
from ..procedure import ProcedureMixin
from .exceptions import BlockTerminationNotice, IncorrectLocationException
from .statements import (SimSootStmt_Return, SimSootStmt_ReturnVoid,
translate_stmt)
from .values import SimSootValue_Local, SimSootValue_ParamRef
l = logging.getLogger('angr.engines.soot.engine')
# pylint: disable=arguments-differ
class SootMixin(SuccessorsMixin, ProcedureMixin):
"""
Execution engine based on Soot.
"""
def lift_soot(self, addr=None, the_binary=None, **kwargs): # pylint: disable=unused-argument, no-self-use
assert isinstance(addr, SootAddressDescriptor)
method, stmt_idx = addr.method, addr.stmt_idx
try:
method = the_binary.get_soot_method(method, params=method.params)
except CLEError as ex:
raise SimTranslationError("CLE error: {}".format(ex))
if stmt_idx is None:
return method.blocks[0] if method.blocks else None
else:
#try:
# _, block = method.block_by_label.floor_item(stmt_idx)
#except KeyError:
# return None
#return block
# TODO: Re-enable the above code once bintrees are used
# FIXME: stmt_idx does not index from the start of the method but from the start
# of the block therefore it always returns the block with label 0 independently
# of where we are
# block = method.block_by_label.get(stmt_idx, None)
# if block is not None:
# return block
# Slow path
for block_idx, block in enumerate(method.blocks):
# if block.label <= stmt_idx < block.label + len(block.statements):
if block_idx == addr.block_idx:
return block
return None
def process_successors(self, successors, **kwargs):
state = self.state
if not isinstance(state._ip, SootAddressDescriptor):
return super().process_successors(successors, **kwargs)
addr = state._ip
if isinstance(addr, SootAddressTerminator):
successors.processed = True
return
if self.project.use_sim_procedures:
procedure = self._get_sim_procedure(addr)
if procedure is not None:
self.process_procedure(state, successors, procedure)
return
binary = state.regs._ip_binary
method = binary.get_soot_method(addr.method, none_if_missing=True)
# TODO make the skipping of code in "android.*" classes optional
if addr.method.class_name.startswith('android.') or not method:
# This means we are executing code that is not in CLE, typically library code.
# We may want soot -> pysoot -> cle to export at least the method names of the libraries
# (soot has a way to deal with this), as of now we just "simulate" a return.
# Note: If we have a sim procedure, we should not reach this point.
l.warning("Try to execute non-loaded code %s. Execute unconstrained SimProcedure.", addr)
# STEP 1: Get unconstrained SimProcedure
procedure = self.get_unconstrained_simprocedure()
# STEP 2: Pass Method descriptor as Parameter
# check if there are already params in the stack
param_idx = 0
param_ref = state.javavm_memory.load(SimSootValue_ParamRef(param_idx, None), none_if_missing=True)
while param_ref is not None:
param_idx += 1
param_ref = state.javavm_memory.load(SimSootValue_ParamRef(param_idx, None), none_if_missing=True)
# store all function arguments in memory, starting from the last param index
state.memory.store(SimSootValue_ParamRef(param_idx, None), addr.method)
# STEP 4: Execute unconstrained procedure
self.process_procedure(state, successors, procedure)
# self._add_return_exit(state, successors)
return
block = method.blocks[addr.block_idx]
starting_stmt_idx = addr.stmt_idx
if starting_stmt_idx == 0:
l.debug("Executing new block %s \n\n%s\n", addr, block)
else:
# l.debug("Continue executing block %s", addr)
l.debug("Continue executing block %s \n\n%s\n", addr, block)
self._handle_soot_block(state, successors, block, starting_stmt_idx, method)
successors.processed = True
def _handle_soot_block(self, state, successors, block, starting_stmt_idx, method=None):
stmt = stmt_idx = None
for tindex, stmt in enumerate(block.statements[starting_stmt_idx:]):
stmt_idx = starting_stmt_idx + tindex
state._inspect('statement', BP_BEFORE, statement=stmt_idx)
terminate = self._handle_soot_stmt(state, successors, stmt_idx, stmt)
state._inspect('statement', BP_AFTER)
if terminate:
break
else:
if stmt is None:
l.warning("Executed empty bb, maybe pc is broken")
return
if method is not None:
next_addr = self._get_next_linear_instruction(state, stmt_idx)
l.debug("Advancing execution linearly to %s", next_addr)
if next_addr is not None:
successors.add_successor(state.copy(), next_addr, state.solver.true, 'Ijk_Boring')
def _handle_soot_stmt(self, state, successors, stmt_idx, stmt):
# execute statement
try:
l.debug("Executing statement: %s", stmt)
s_stmt = translate_stmt(stmt, state)
except SimEngineError as e:
l.error("Skipping statement: %s", e)
return False
# add invoke exit
if s_stmt.has_invoke_target:
invoke_state = state.copy()
# parse invoke expression
invoke_expr = s_stmt.invoke_expr
method = invoke_expr.method
args = invoke_expr.args
ret_var = invoke_expr.ret_var if hasattr(invoke_expr, 'ret_var') else None
# setup callsite
ret_addr = self._get_next_linear_instruction(state, stmt_idx)
if 'NATIVE' in method.attrs:
# the target of the call is a native function
# => we need to setup a native call-site
l.debug("Native invoke: %r", method)
addr = self.project.simos.get_addr_of_native_method(method)
if not addr:
# native function could not be found
# => skip invocation and continue execution linearly
return False
invoke_state = self._setup_native_callsite(invoke_state, addr, method, args, ret_addr, ret_var)
else:
l.debug("Invoke: %r", method)
self.setup_callsite(invoke_state, args, ret_addr, ret_var)
addr = SootAddressDescriptor(method, 0, 0)
# add invoke state as the successor and terminate execution
# prematurely, since Soot does not guarantee that an invoke stmt
# terminates a block
successors.add_successor(invoke_state, addr, state.solver.true, 'Ijk_Call')
return True
# add jmp exit
elif s_stmt.has_jump_targets:
for target, condition in s_stmt.jmp_targets_with_conditions:
if not target:
target = self._get_next_linear_instruction(state, stmt_idx)
l.debug("Possible jump: %s -> %s", state._ip, target)
successors.add_successor(state.copy(), target, condition, 'Ijk_Boring')
return True
# add return exit
elif isinstance(s_stmt, (SimSootStmt_Return, SimSootStmt_ReturnVoid)):
l.debug("Return exit")
self._add_return_exit(state, successors, s_stmt.return_value)
return True
# go on linearly
else:
return False
@classmethod
def _add_return_exit(cls, state, successors, return_val=None):
ret_state = state.copy()
cls.prepare_return_state(ret_state, return_val)
successors.add_successor(ret_state, state.callstack.ret_addr, ret_state.solver.true, 'Ijk_Ret')
successors.processed = True
def _get_sim_procedure(self, addr):
# Delayed import
from ...procedures import SIM_PROCEDURES
if addr in self.project._sim_procedures:
return self.project._sim_procedures[addr]
method = addr.method
class_name = method.class_name
method_prototype = "%s(%s)" % (method.name, ",".join(method.params))
if class_name in SIM_PROCEDURES and \
method_prototype in SIM_PROCEDURES[class_name]:
procedure_cls = SIM_PROCEDURES[class_name][method_prototype]
else:
return None
# Lazy-initialize it
proc = procedure_cls(project=self.project)
self.project._sim_procedures[addr] = proc
return proc
def get_unconstrained_simprocedure(self):
# Delayed import
from ...procedures import SIM_PROCEDURES
# TODO: fix method prototype
procedure_cls = SIM_PROCEDURES["angr.unconstrained"]["unconstrained()"]
# Lazy-initialize it
proc = procedure_cls(project=self.project)
return proc
@staticmethod
def _is_method_beginning(addr):
return addr.block_idx == 0 and addr.stmt_idx == 0
@staticmethod
def _get_next_linear_instruction(state, stmt_idx):
addr = state.addr.copy()
addr.stmt_idx = stmt_idx
method = state.regs._ip_binary.get_soot_method(addr.method)
current_bb = method.blocks[addr.block_idx]
new_stmt_idx = addr.stmt_idx + 1
if new_stmt_idx < len(current_bb.statements):
return SootAddressDescriptor(addr.method, addr.block_idx, new_stmt_idx)
else:
new_bb_idx = addr.block_idx + 1
if new_bb_idx < len(method.blocks):
return SootAddressDescriptor(addr.method, new_bb_idx, 0)
else:
l.warning("falling into a non existing bb: %d in %s",
new_bb_idx, SootMethodDescriptor.from_soot_method(method))
raise IncorrectLocationException()
@classmethod
def setup_callsite(cls, state, args, ret_addr, ret_var=None):
# push new callstack frame
state.callstack.push(state.callstack.copy())
state.callstack.ret_addr = ret_addr
state.callstack.invoke_return_variable = ret_var
# push new memory stack frame
state.javavm_memory.push_stack_frame()
# setup arguments
if args:
cls.setup_arguments(state, list(args))
@staticmethod
def setup_arguments(state, args):
# if available, store the 'this' reference
if len(args) > 0 and args[0].is_this_ref:
this_ref = args.pop(0)
local = SimSootValue_Local('this', this_ref.type)
state.javavm_memory.store(local, this_ref.value)
# store all function arguments in memory
for idx, arg in enumerate(args):
param_ref = SimSootValue_ParamRef(idx, arg.type)
state.javavm_memory.store(param_ref, arg.value)
@staticmethod
def prepare_return_state(state, ret_value=None):
# pop callstack
ret_var = state.callstack.invoke_return_variable
procedure_data = state.callstack.procedure_data
state.callstack.pop()
# pass procedure data to the current callstack (if available)
# => this should get removed by the corresponding sim procedure
state.callstack.procedure_data = procedure_data
# pop memory frame
state.memory.pop_stack_frame()
# save return value
if ret_value is not None:
l.debug("Assign %r := %r", ret_var, ret_value)
if ret_var is not None:
# usually the return value is read from the previous stack frame
state.memory.store(ret_var, ret_value)
else:
# however if we call a method from outside (e.g. project.state_call),
# no previous stack frame exist and the return variable is not set
# => for this cases, we store the value in the registers, so it can
# still be accessed
state.regs.invoke_return_value = ret_value
@staticmethod
def terminate_execution(statement, state, successors):
l.debug("Returning with an empty stack: ending execution")
# this code is coming by sim_procedure.exit()
state.options.discard(o.AST_DEPS)
state.options.discard(o.AUTO_REFS)
exit_code = 0
if type(statement) is SimSootStmt_Return:
exit_code = statement.return_value
# TODO symbolic exit code?
exit_code = state.solver.BVV(exit_code, state.arch.bits)
state.history.add_event('terminate', exit_code=exit_code)
successors.add_successor(state, state.regs.ip, state.solver.true, 'Ijk_Exit')
successors.processed = True
raise BlockTerminationNotice()
#
# JNI Native Interface
#
@staticmethod
def prepare_native_return_state(native_state):
"""
Hook target for native function call returns.
Recovers and stores the return value from native memory and toggles the
state, s.t. execution continues in the Soot engine.
"""
javavm_simos = native_state.project.simos
ret_state = native_state.copy()
# set successor flags
ret_state.regs._ip = ret_state.callstack.ret_addr
ret_state.scratch.guard = ret_state.solver.true
ret_state.history.jumpkind = 'Ijk_Ret'
# if available, lookup the return value in native memory
ret_var = ret_state.callstack.invoke_return_variable
if ret_var is not None:
# get return symbol from native state
native_cc = javavm_simos.get_native_cc()
ret_symbol = native_cc.return_val(
javavm_simos.get_native_type(ret_var.type)
).get_value(native_state).to_claripy()
# convert value to java type
if ret_var.type in ArchSoot.primitive_types:
# return value has a primitive type
# => we need to manually cast the return value to the correct size, as this
# would be usually done by the java callee
ret_value = javavm_simos.cast_primitive(ret_state, ret_symbol,
to_type=ret_var.type)
else:
# return value has a reference type
# => ret_symbol is a opaque ref
# => lookup corresponding java reference
ret_value = ret_state.jni_references.lookup(ret_symbol)
else:
ret_value = None
# teardown return state
SootMixin.prepare_return_state(ret_state, ret_value)
# finally, delete all local references
ret_state.jni_references.clear_local_references()
return [ret_state]
@classmethod
def _setup_native_callsite(cls, state, native_addr, java_method, args, ret_addr, ret_var):
# Step 1: setup java callsite, but w/o storing arguments in memory
cls.setup_callsite(state, None, ret_addr, ret_var)
# Step 2: add JNI specific arguments to *args list
# get JNI environment pointer
jni_env = SootArgument(state.project.simos.jni_env, "JNIEnv")
# get reference to the current object or class
if args and args[0].is_this_ref:
# instance method call
# => pass 'this' reference to native code
ref = args.pop(0)
else:
# static method call
# => pass 'class' reference to native code
class_ = state.javavm_classloader.get_class(java_method.class_name, init_class=True)
ref = SootArgument(class_, "Class")
# add to args
final_args = [jni_env, ref] + args
# Step 3: generate C prototype from java_method
voidp = parse_type('void*')
arg_types = [voidp, voidp] + [state.project.simos.get_native_type(ty) for ty in java_method.params]
ret_type = state.project.simos.get_native_type(java_method.ret)
prototype = SimTypeFunction(args=arg_types, returnty=ret_type)
# Step 3: create native invoke state
return state.project.simos.state_call(native_addr, *final_args,
base_state=state,
prototype=prototype,
ret_type=java_method.ret)
| {
"content_hash": "6c84184f768568abfd387f1d8bf92b29",
"timestamp": "",
"source": "github",
"line_count": 414,
"max_line_length": 114,
"avg_line_length": 42.16183574879227,
"alnum_prop": 0.6010885133199656,
"repo_name": "angr/angr",
"id": "132bc13bbf38b468364dac5e0f38a75732b152aa",
"size": "17456",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "angr/engines/soot/engine.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "6694"
},
{
"name": "C++",
"bytes": "146292"
},
{
"name": "Makefile",
"bytes": "946"
},
{
"name": "Python",
"bytes": "27717304"
}
],
"symlink_target": ""
} |
class TestSequenceFunctions():
def setUp(self):
pass
def tearDown(self):
pass
def test_listcontroler_loads(self):
pass
| {
"content_hash": "7596fbb83d66eb779a7c4ff32a3bcd4b",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 39,
"avg_line_length": 16.2,
"alnum_prop": 0.5802469135802469,
"repo_name": "hepix-virtualisation/vmcaster",
"id": "41ae0ae4a24ad08b6806d92b2599567c9e56f623",
"size": "164",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "vmcasterpub/tests/test_outfacard.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "126935"
}
],
"symlink_target": ""
} |
import os.path
import logging
import json
import tempfile
import shutil
import time
from ceph_deploy import hosts
from ceph_deploy.cliutil import priority
from ceph_deploy.lib import remoto
import ceph_deploy.util.paths.mon
LOG = logging.getLogger(__name__)
def _keyring_equivalent(keyring_one, keyring_two):
"""
Check two keyrings are identical
"""
def keyring_extract_key(file_path):
"""
Cephx keyring files may or may not have white space before some lines.
They may have some values in quotes, so a safe way to compare is to
extract the key.
"""
with file(file_path, 'r') as f:
for line in f:
content = line.strip()
if len(content) == 0:
continue
split_line = content.split('=')
if split_line[0].strip() == 'key':
return "=".join(split_line[1:]).strip()
raise RuntimeError("File '%s' is not a keyring" % file_path)
key_one = keyring_extract_key(keyring_one)
key_two = keyring_extract_key(keyring_two)
return key_one == key_two
def keytype_path_to(args, keytype):
"""
Get the local filename for a keyring type
"""
if keytype == "admin":
return '{cluster}.client.admin.keyring'.format(
cluster=args.cluster)
if keytype == "mon":
return '{cluster}.mon.keyring'.format(
cluster=args.cluster)
return '{cluster}.bootstrap-{what}.keyring'.format(
cluster=args.cluster,
what=keytype)
def keytype_identity(keytype):
"""
Get the keyring identity from keyring type.
This is used in authentication with keyrings and generating keyrings.
"""
ident_dict = {
'admin' : 'client.admin',
'mds' : 'client.bootstrap-mds',
'osd' : 'client.bootstrap-osd',
'rgw' : 'client.bootstrap-rgw',
'mon' : 'mon.'
}
return ident_dict.get(keytype, None)
def keytype_capabilities(keytype):
"""
Get the capabilities of a keyring from keyring type.
"""
cap_dict = {
'admin' : [
'osd', 'allow *',
'mds', 'allow *',
'mon', 'allow *'
],
'mds' : [
'mon', 'allow profile bootstrap-mds'
],
'osd' : [
'mon', 'allow profile bootstrap-osd'
],
'rgw': [
'mon', 'allow profile bootstrap-rgw'
]
}
return cap_dict.get(keytype, None)
def gatherkeys_missing(args, distro, rlogger, keypath, keytype, dest_dir):
"""
Get or create the keyring from the mon using the mon keyring by keytype and
copy to dest_dir
"""
arguments = [
'/usr/bin/ceph',
'--connect-timeout=25',
'--cluster={cluster}'.format(
cluster=args.cluster),
'--name', 'mon.',
'--keyring={keypath}'.format(
keypath=keypath),
'auth', 'get-or-create',
]
identity = keytype_identity(keytype)
if identity is None:
raise RuntimeError('Could not find identity for keytype:%s' % keytype)
arguments.append(identity)
capabilites = keytype_capabilities(keytype)
if capabilites is None:
raise RuntimeError('Could not find capabilites for keytype:%s' % keytype)
arguments.extend(capabilites)
out, err, code = remoto.process.check(
distro.conn,
arguments
)
if code != 0:
rlogger.error('"ceph auth get-or-create for keytype %s returned %s', keytype, code)
for line in err:
rlogger.debug(line)
return False
keyring_name_local = keytype_path_to(args, keytype)
keyring_path_local = os.path.join(dest_dir, keyring_name_local)
with file(keyring_path_local, 'w') as f:
for line in out:
f.write(line + '\n')
return True
def gatherkeys_with_mon(args, host, dest_dir):
"""
Connect to mon and gather keys if mon is in quorum.
"""
distro = hosts.get(host, username=args.username)
remote_hostname = distro.conn.remote_module.shortname()
dir_keytype_mon = ceph_deploy.util.paths.mon.path(args.cluster, remote_hostname)
path_keytype_mon = "%s/keyring" % (dir_keytype_mon)
mon_key = distro.conn.remote_module.get_file(path_keytype_mon)
if mon_key is None:
LOG.warning("No mon key found in host: %s", host)
return False
mon_name_local = keytype_path_to(args, "mon")
mon_path_local = os.path.join(dest_dir, mon_name_local)
with file(mon_path_local, 'w') as f:
f.write(mon_key)
rlogger = logging.getLogger(host)
path_asok = ceph_deploy.util.paths.mon.asok(args.cluster, remote_hostname)
out, err, code = remoto.process.check(
distro.conn,
[
"/usr/bin/ceph",
"--connect-timeout=25",
"--cluster={cluster}".format(
cluster=args.cluster),
"--admin-daemon={asok}".format(
asok=path_asok),
"mon_status"
]
)
if code != 0:
rlogger.error('"ceph mon_status %s" returned %s', host, code)
for line in err:
rlogger.debug(line)
return False
try:
mon_status = json.loads("".join(out))
except ValueError:
rlogger.error('"ceph mon_status %s" output was not json', host)
for line in out:
rlogger.error(line)
return False
mon_number = None
mon_map = mon_status.get('monmap')
if mon_map is None:
rlogger.error("could not find mon map for mons on '%s'", host)
return False
mon_quorum = mon_status.get('quorum')
if mon_quorum is None:
rlogger.error("could not find quorum for mons on '%s'" , host)
return False
mon_map_mons = mon_map.get('mons')
if mon_map_mons is None:
rlogger.error("could not find mons in monmap on '%s'", host)
return False
for mon in mon_map_mons:
if mon.get('name') == remote_hostname:
mon_number = mon.get('rank')
break
if mon_number is None:
rlogger.error("could not find '%s' in monmap", remote_hostname)
return False
if not mon_number in mon_quorum:
rlogger.error("Not yet quorum for '%s'", host)
return False
for keytype in ["admin", "mds", "osd", "rgw"]:
if not gatherkeys_missing(args, distro, rlogger, path_keytype_mon, keytype, dest_dir):
# We will return failure if we fail to gather any key
rlogger.error("Failed to return '%s' key from host ", keytype, host)
return False
return True
def gatherkeys(args):
"""
Gather keys from any mon and store in current working directory.
Backs up keys from previous installs and stores new keys.
"""
oldmask = os.umask(077)
try:
try:
tmpd = tempfile.mkdtemp()
LOG.info("Storing keys in temp directory %s", tmpd)
sucess = False
for host in args.mon:
sucess = gatherkeys_with_mon(args, host, tmpd)
if sucess:
break
if not sucess:
LOG.error("Failed to connect to host:%s" ,', '.join(args.mon))
raise RuntimeError('Failed to connect any mon')
had_error = False
date_string = time.strftime("%Y%m%d%H%M%S")
for keytype in ["admin", "mds", "mon", "osd", "rgw"]:
filename = keytype_path_to(args, keytype)
tmp_path = os.path.join(tmpd, filename)
if not os.path.exists(tmp_path):
LOG.error("No key retrived for '%s'" , keytype)
had_error = True
continue
if not os.path.exists(filename):
LOG.info("Storing %s" % (filename))
shutil.move(tmp_path, filename)
continue
if _keyring_equivalent(tmp_path, filename):
LOG.info("keyring '%s' already exists" , filename)
continue
backup_keyring = "%s-%s" % (filename, date_string)
LOG.info("Replacing '%s' and backing up old key as '%s'", filename, backup_keyring)
shutil.copy(filename, backup_keyring)
shutil.move(tmp_path, filename)
if had_error:
raise RuntimeError('Failed to get all key types')
finally:
LOG.info("Destroy temp directory %s" %(tmpd))
shutil.rmtree(tmpd)
finally:
os.umask(oldmask)
@priority(40)
def make(parser):
"""
Gather authentication keys for provisioning new nodes.
"""
parser.add_argument(
'mon',
metavar='HOST',
nargs='+',
help='monitor host to pull keys from',
)
parser.set_defaults(
func=gatherkeys,
)
| {
"content_hash": "70afc6cd539e83544ac5fb2190fe7b56",
"timestamp": "",
"source": "github",
"line_count": 268,
"max_line_length": 99,
"avg_line_length": 33.548507462686565,
"alnum_prop": 0.561450339228117,
"repo_name": "Vicente-Cheng/ceph-deploy",
"id": "a7d0981d902e29a551979001edb7f53e749cc8f4",
"size": "8991",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ceph_deploy/gatherkeys.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "355276"
},
{
"name": "Shell",
"bytes": "7086"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import os
import numpy as np
import pandas as pd
import marvin
from marvin.utils.yanny import yanny
# Stores the maskbits yanny file structure so that we don't need to open it more than once.
_maskbits_from_yanny = None
def _read_maskbit_schemas():
"""Read all available SDSS maskbit schemas from yanny file.
Returns:
Record Array: all bits for all schemas.
"""
global _maskbits_from_yanny
if _maskbits_from_yanny is None:
path_maskbits = os.path.join(os.path.dirname(marvin.__file__), 'data', 'sdssMaskbits.par')
_maskbits_from_yanny = yanny(path_maskbits, np=True)
return _maskbits_from_yanny['MASKBITS']
def get_available_maskbits():
"""Get names of available maskbit schemas from yanny file.
Returns:
list: Names of available maskbits.
"""
maskbits = _read_maskbit_schemas()
return sorted(set([it[0] for it in maskbits]))
def get_manga_target(flag_id, bitmasks, header):
"""Get MANGA_TARGET[``flag_id``] flag.
Parameters:
flag_id (str):
Flag ID number (e.g., "1" for MANGA_TARGET1).
bitmasks (dict):
`Maskbit` objects.
header (`astropy.io.fits.header.Header`):
File header.
Returns:
`Maskbit`
"""
flag_id = str(int(flag_id))
manga_target = bitmasks['MANGA_TARGET{}'.format(flag_id)]
try:
manga_target.mask = int(header['MNGTRG{}'.format(flag_id)])
except KeyError:
manga_target.mask = int(header['MNGTARG{}'.format(flag_id)])
return manga_target
class Maskbit(object):
"""A class representing a maskbit.
Parameters:
schema (DataFrame):
Maskbit schema.
name (str):
Name of maskbit.
description (str):
Description of maskbit.
"""
def __init__(self, name, schema=None, description=None):
self.name = name
self.schema = schema if schema is not None else self._load_schema(name)
self.description = description if description is not None else None
self.mask = None
def __repr__(self):
if (isinstance(self.mask, int) or self.mask is None):
labels = self.labels
else:
labels = 'shape={}'.format(self.mask.shape)
return '<Maskbit {0!r} {1}>'.format(self.name, labels)
def _load_schema(self, flag_name):
"""Load SDSS Maskbit schema from yanny file.
Parameters:
flag_name (str):
Name of flag.
Returns:
DataFrame: Schema of flag.
"""
maskbits = _read_maskbit_schemas()
flag = maskbits[maskbits['flag'] == flag_name]
return pd.DataFrame(flag[['bit', 'label', 'description']])
@property
def bits(self):
return self.values_to_bits() if self.mask is not None else None
@property
def labels(self):
return self.values_to_labels() if self.mask is not None else None
def values_to_bits(self, values=None):
"""Convert mask values to a list of bits set.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
Returns:
list:
Bits that are set.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.values_to_bits()
[[[0, 1, 4, 30],
[0, 1, 4, 30],
...
[0, 1, 4, 30]]]
"""
# assert (self.mask is not None) or (values is not None), 'Must provide values.'
# values = np.array(self.mask) if values is None else np.array(values)
# ndim = values.ndim
# assert ndim <= 3, '`value` must be int, 1-D array, 2-D array, or 3-D array.'
# # expand up to 2 dimensions
# while values.ndim < 3:
# values = np.array([values])
# # create list of list of lists of bits set
# bits_set = []
# for ii in range(values.shape[0]):
# row_ii = []
# for jj in range(values.shape[1]):
# row_jj = []
# for kk in range(values.shape[2]):
# row_jj.append(self._value_to_bits(values[ii, jj, kk], self.schema.bit.values))
# row_ii.append(row_jj)
# bits_set.append(row_ii)
# # condense back down to initial dimensions
# for __ in range(3 - ndim):
# bits_set = bits_set[0]
bits_set = self._get_a_set(values, convert_to='bits')
return bits_set
def _get_uniq_bits(self, values):
''' Return a dictionary of unique bits
Parameters:
values (list):
A flattened list of mask values
Returns:
dict:
A unique dictionary of {mask value: bit list} as {key: value}
'''
uniqvals = set(values)
vdict = {v: self._value_to_bits(v, self.schema.bit.values) for v in uniqvals}
return vdict
def _get_uniq_labels(self, values):
''' Return a dictionary of unique labels
Parameters:
values (list):
A flattened list of mask values
Returns:
dict:
A unique dictionary of {mask value: labels list} as {key: value}
'''
uniqbits = self._get_uniq_bits(values)
uniqlabels = {k: self.schema.label[self.schema.bit.isin(v)].values.tolist() for k, v in uniqbits.items()}
return uniqlabels
def _get_a_set(self, values, convert_to='bits'):
''' Convert mask values to a list of either bit or label sets.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
convert_to (str):
Indicates what to convert to. Either "bits" or "labels"
Returns:
list:
Bits/Labels that are set.
'''
assert (self.mask is not None) or (values is not None), 'Must provide values.'
values = np.array(self.mask) if values is None else np.array(values)
ndim = values.ndim
shape = values.shape
assert ndim <= 3, '`value` must be int, 1-D array, 2-D array, or 3-D array.'
flatmask = values.flatten()
if convert_to == 'bits':
uniqvals = self._get_uniq_bits(flatmask)
elif convert_to == 'labels':
uniqvals = self._get_uniq_labels(flatmask)
vallist = list(map(lambda x: uniqvals[x], flatmask))
if ndim > 0:
vals_set = np.reshape(vallist, shape).tolist()
else:
vals_set = vallist[0]
return vals_set
def _value_to_bits(self, value, bits_all):
"""Convert mask value to a list of bits.
Parameters:
value (int):
Mask value.
bits_all (array):
All bits for flag.
Returns:
list:
Bits that are set.
"""
return [it for it in bits_all if int(value) & (1 << it)]
def values_to_labels(self, values=None):
"""Convert mask values to a list of the labels of bits set.
Parameters:
values (int or array):
Mask values. If ``None``, apply to entire
``Maskbit.mask`` array. Default is ``None``.
Returns:
list:
Bits that are set.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.values_to_labels()
[[['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE'],
['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE'],
...
['NOCOV', 'LOWCOV', 'NOVALUE', 'DONOTUSE']]]
"""
#bits_set = self.values_to_bits(values=values)
#labels_set = self._bits_to_labels(bits_set)
labels_set = self._get_a_set(values, convert_to='labels')
return labels_set
def _bits_to_labels(self, nested):
"""Recursively convert a nested list of bits to labels.
Parameters:
nested (list):
Nested list of bits.
Returns:
list: Nested list of labels.
"""
# Base condition
if isinstance(nested, (int, np.integer)):
return self.schema.label[self.schema.bit == nested].values[0]
return [self._bits_to_labels(it) for it in nested]
def labels_to_value(self, labels):
"""Convert bit labels into a bit value.
Parameters:
labels (str or list):
Labels of bits to set.
Returns:
int: Integer bit value.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask._labels_to_value('DONOTUSE')
1073741824
>>> ha.pixmask._labels_to_value(['NOCOV', 'LOWCOV'])
3
"""
if isinstance(labels, str):
labels = [labels]
bit_values = []
for label in labels:
bit = self.schema.bit[self.schema.label == label]
if not bit.empty:
bit_values.append(bit.values[0])
return np.sum([2**value for value in bit_values])
def labels_to_bits(self, labels):
"""Convert bit labels into bits.
Parameters:
labels (str or list):
Labels of bits.
Returns:
list: Bits that correspond to the labels.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.labels_to_bits('DONOTUSE')
[30]
>>> ha.pixmask.labels_to_value(['NOCOV', 'LOWCOV'])
[0, 1]
"""
return self.values_to_bits(self.labels_to_value(labels))
def get_mask(self, labels, mask=None, dtype=int):
"""Create mask from a list of labels.
If ``dtype`` is ``int``, then ``get_mask`` can effectively
perform an OR or AND operation. However, if ``dtype`` is
``bool``, then ``get_mask`` does an OR.
Parameters:
labels (str or list):
Labels of bits.
mask (int or array):
User-defined mask. If ``None``, use ``self.mask``.
Default is ``None``.
dtype:
Output dtype, which must be either ``int`` or ``bool``.
Default is ``int``.
Returns:
array: Mask for given labels.
Example:
>>> maps = Maps(plateifu='8485-1901')
>>> ha = maps['emline_gflux_ha_6564']
>>> ha.pixmask.get_mask(['NOCOV', 'LOWCOV'])
array([[3, 3, 3, ..., 3, 3, 3],
...,
[3, 3, 3, ..., 3, 3, 3]])
>>> ha.pixmask.get_mask(['NOCOV', 'LOWCOV'], dtype=bool)
array([[ True, True, True, ..., True, True, True],
...,
[ True, True, True, ..., True, True, True]], dtype=bool)
"""
assert dtype in [int, bool], '``dtype`` must be either ``int`` or ``bool``.'
if isinstance(labels, str):
labels = [labels]
schema_labels = self.schema.label.tolist()
for label in labels:
if label not in schema_labels:
raise ValueError('label {0!r} not found in the maskbit schema.'.format(label))
bits = self.labels_to_bits(labels)
mask = mask if mask is not None else self.mask
if len(bits) == 0:
return np.zeros(mask.shape, dtype=int)
return np.sum([mask & 2**bit for bit in bits], axis=0).astype(dtype)
| {
"content_hash": "c102d2ff532d161d8de6b173ad01073d",
"timestamp": "",
"source": "github",
"line_count": 395,
"max_line_length": 113,
"avg_line_length": 30.379746835443036,
"alnum_prop": 0.5294166666666666,
"repo_name": "sdss/marvin",
"id": "a427e6b4169664b67c4cf830ed463bd6ce968677",
"size": "12229",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "python/marvin/utils/general/maskbit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "166739"
},
{
"name": "HTML",
"bytes": "91250"
},
{
"name": "JavaScript",
"bytes": "247561"
},
{
"name": "PLpgSQL",
"bytes": "1577"
},
{
"name": "Python",
"bytes": "1706012"
},
{
"name": "SCSS",
"bytes": "266310"
},
{
"name": "Shell",
"bytes": "1150"
}
],
"symlink_target": ""
} |
import logging
from shove import Shove
logging.basicConfig(level=logging.DEBUG)
from plex import Plex
import os
def print_title(container):
print list(container)[0].title
if __name__ == '__main__':
# Without caching
print_title(Plex['library'].metadata(3))
print_title(Plex['library'].metadata(3))
print
# Build shove
cache_dir = os.path.abspath('cache')
print 'cache_dir: %r' % cache_dir
cache = Shove('file://%s' % cache_dir, 'memory://', optimize=False)
# With caching
print_title(Plex['library'].use(cache).metadata(3))
print_title(Plex['library'].use(cache).metadata(3))
# close cache (sync back to disk)
cache.close()
| {
"content_hash": "a2037cd3445630da1a12104aa6db8cb2",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 71,
"avg_line_length": 21.59375,
"alnum_prop": 0.6512301013024602,
"repo_name": "fuzeman/plex.py",
"id": "558c005a0b7947ae8cd3a8de0bb29a9fca2ff726",
"size": "691",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/caching.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "135510"
}
],
"symlink_target": ""
} |
from time import time
class LabelAnnealer(object):
"""Keeps track of how many labels we want to collect"""
def __init__(self, agent_logger, final_timesteps, final_labels, pretrain_labels):
self._agent_logger = agent_logger
self._final_timesteps = final_timesteps
self._final_labels = final_labels
self._pretrain_labels = pretrain_labels
@property
def n_desired_labels(self):
"""Return the number of labels desired at this point in training. """
exp_decay_frac = 0.01 ** (self._agent_logger._timesteps_elapsed / self._final_timesteps) # Decay from 1 to 0
pretrain_frac = self._pretrain_labels / self._final_labels
desired_frac = pretrain_frac + (1 - pretrain_frac) * (1 - exp_decay_frac) # Start with 0.25 and anneal to 0.99
return desired_frac * self._final_labels
class ConstantLabelSchedule(object):
def __init__(self, pretrain_labels, seconds_between_labels=3.0):
self._started_at = None # Don't initialize until we call n_desired_labels
self._seconds_between_labels = seconds_between_labels
self._pretrain_labels = pretrain_labels
@property
def n_desired_labels(self):
if self._started_at is None:
self._started_at = time()
return self._pretrain_labels + (time() - self._started_at) / self._seconds_between_labels
| {
"content_hash": "1404e9d76364680b4fae7848e605b47e",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 119,
"avg_line_length": 45.93333333333333,
"alnum_prop": 0.6611030478955007,
"repo_name": "nottombrown/rl-teacher",
"id": "b56970fdbfd20f42e82b90643e3d152b7bfd1744",
"size": "1378",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rl_teacher/label_schedules.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "14594"
},
{
"name": "Python",
"bytes": "224250"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import filecmp
a = r"\\DEQWQNAS01\Lidar08\LiDAR\BE"
b = r"P:\Willamette\BE"
match, mismatch, err = filecmp.cmpfiles(a, b, common)
print(mismatch) | {
"content_hash": "753d2dbbe61e21379687208e530d604d",
"timestamp": "",
"source": "github",
"line_count": 9,
"max_line_length": 53,
"avg_line_length": 20.666666666666668,
"alnum_prop": 0.7258064516129032,
"repo_name": "rmichie/PyScripts",
"id": "3d0c6f5b45709585e4f280229382b39f354bd287",
"size": "187",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "directory_compare.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "267709"
}
],
"symlink_target": ""
} |
from Models.FeatureProcessing import *
import sklearn
from sklearn.linear_model import LogisticRegression
import numpy as np
import abc
from ClassificationModule import ClassificationModule
class lrreadmeonly(ClassificationModule):
"""A basic logistic regressor"""
def __init__(self, text_corpus):
ClassificationModule.__init__(self, "Readme Only Logistic Regressor", "A Logistic Regressor")
# Create vectorizer and fit on all available Descriptions
self.vectorizer = getTextVectorizer(9000) # Maximum of different columns
corpus = []
for readme in text_corpus:
corpus.append(process_text(readme))
self.vectorizer.fit(corpus)
self.clf = LogisticRegression(multi_class='multinomial', solver='lbfgs', class_weight = 'auto')
print "\t-", self.name
def resetAllTraining(self):
"""Reset classification module to status before training"""
self.clf = sklearn.base.clone(self.clf)
def trainOnSample(self, sample, shuffle=True, verbose=True):
"""Trainiere (inkrementell) mit Sample. Evtl zusätzlich mit best. Menge alter Daten, damit overfitten auf neue Daten verhindert wird."""
readme_vec = self.formatInputData(sample)
label_index = getLabelIndex(sample)
return self.clf.fit(readme_vec, np.expand_dims(label_index, axis=0))
def train(self, samples, shuffle=True, verbose=True):
"""Trainiere mit Liste von Daten. Evtl weitere Paramter nötig (nb_epoch, learning_rate, ...)"""
train_samples = []
train_lables = []
for sample in samples:
formatted_sample = self.formatInputData(sample)[0].tolist()
train_samples.append(formatted_sample)
train_lables.append(getLabelIndex(sample))
train_lables = np.asarray(train_lables)
train_result = self.clf.fit(train_samples, train_lables)
self.isTrained = True
return train_result
def predictLabel(self, sample):
"""Gibt zurück, wie der Klassifikator ein gegebenes Sample klassifizieren würde"""
if not self.isTrained:
return 0
sample = self.formatInputData(sample)
return self.clf.predict(sample)[0]
def predictLabelAndProbability(self, sample):
"""Return the probability the module assignes each label"""
if not self.isTrained:
return [0, 0, 0, 0, 0, 0, 0, 0]
sample = self.formatInputData(sample)
prediction = self.clf.predict_proba(sample)[0]
return [np.argmax(prediction)] + list(prediction)
def formatInputData(self, sample):
"""Extract readme and transform to vector"""
sd = getReadme(sample)
# Returns numpy array which contains 1 array with features
return self.vectorizer.transform([sd]).toarray()
| {
"content_hash": "4378b19919ef259886f6a2104af1f619",
"timestamp": "",
"source": "github",
"line_count": 73,
"max_line_length": 144,
"avg_line_length": 39.0958904109589,
"alnum_prop": 0.665031534688157,
"repo_name": "Ichaelus/Github-Classifier",
"id": "b15f167a035c59e005be83baeef7831f39a5bbd2",
"size": "2904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "Application/Models/ClassificationModules/lrreadmeonly.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "42"
},
{
"name": "Batchfile",
"bytes": "15"
},
{
"name": "CSS",
"bytes": "25708"
},
{
"name": "HTML",
"bytes": "43445"
},
{
"name": "JavaScript",
"bytes": "81211"
},
{
"name": "Jupyter Notebook",
"bytes": "45664"
},
{
"name": "PHP",
"bytes": "39760"
},
{
"name": "Python",
"bytes": "244259"
}
],
"symlink_target": ""
} |
"""This example gets all ad group criteria in an account.
To add keywords, run add_keywords.py.
Tags: AdGroupCriterionService.get
Api: AdWordsOnly
"""
__author__ = ('api.kwinter@gmail.com (Kevin Winter)'
'Joseph DiLallo')
from googleads import adwords
PAGE_SIZE = 500
def main(client):
# Initialize appropriate service.
ad_group_criterion_service = client.GetService(
'AdGroupCriterionService', version='v201309')
# Construct selector and get all ad group criteria.
offset = 0
selector = {
'fields': ['AdGroupId', 'Id', 'Text', 'KeywordMatchType', 'PlacementUrl'],
'predicates': [{
'field': 'CriteriaType',
'operator': 'EQUALS',
'values': ['KEYWORD']
}],
'paging': {
'startIndex': str(offset),
'numberResults': str(PAGE_SIZE)
}
}
more_pages = True
while more_pages:
page = ad_group_criterion_service.get(selector)
# Display results.
if 'entries' in page:
for criterion in page['entries']:
print ('Keyword ad group criterion with ad group id \'%s\', criterion '
'id \'%s\', text \'%s\', and match type \'%s\' was found.'
% (criterion['adGroupId'], criterion['criterion']['id'],
criterion['criterion']['text'],
criterion['criterion']['matchType']))
else:
print 'No keywords were found.'
offset += PAGE_SIZE
selector['paging']['startIndex'] = str(offset)
more_pages = offset < int(page['totalNumEntries'])
if __name__ == '__main__':
# Initialize client object.
adwords_client = adwords.AdWordsClient.LoadFromStorage()
main(adwords_client)
| {
"content_hash": "c02be6f2f5434eb92e4c90efe1b66834",
"timestamp": "",
"source": "github",
"line_count": 60,
"max_line_length": 80,
"avg_line_length": 28.083333333333332,
"alnum_prop": 0.6065281899109792,
"repo_name": "jdilallo/jdilallo-test",
"id": "683e2f2fae0c6888f1d35d97f9489a6320c3972d",
"size": "2303",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/adwords/v201309/basic_operations/get_keywords.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "722738"
}
],
"symlink_target": ""
} |
import Tkinter
from tkFileDialog import askopenfilename
class File_Loader():
def loadMP3(self):
currentAudioFilePath = askopenfilename(filetypes=[("Sound files","*.mp3, *.wav")]) # show an "Open" dialog box and return the path to the selected file
print(currentAudioFilePath)
return currentAudioFilePath | {
"content_hash": "f3f5890ed6e6b8759548c43c281d96e6",
"timestamp": "",
"source": "github",
"line_count": 10,
"max_line_length": 159,
"avg_line_length": 33.4,
"alnum_prop": 0.7245508982035929,
"repo_name": "Thomas-Stewart/RadioFrequencyInterpreter",
"id": "f15e542281e4d227219004b900f29b4a3e994c82",
"size": "334",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/file_loader.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "13832"
}
],
"symlink_target": ""
} |
import unittest
import os
import yaml
import imp
# add parent dir to search path
import sys
#sys.path.insert(0, "..")
f = None
try:
(f, pathname, description) = imp.find_module("monkeyYaml", [os.path.join(os.getcwd(), "../")])
module = imp.load_module("monkeyYaml", f, pathname, description)
monkeyYaml = module
except:
raise ImportError("Cannot load monkeyYaml")
finally:
if f:
f.close()
#import monkeyYaml
class TestMonkeyYAMLParsing(unittest.TestCase):
def test_empty(self):
self.assertEqual(monkeyYaml.load(""), yaml.load(""))
def test_newline(self):
self.assertEqual(monkeyYaml.load("\n"), yaml.load("\n"))
def test_oneline(self):
y = "foo: bar"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_twolines(self):
y = "foo: bar\nbaz_bletch : blith:er"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_multiLine(self):
y = "foo: >\n bar\nbaz: 3"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_es5id(self):
y = "es5id: 15.2.3.6-4-102"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_Multiline_1(self):
lines = [" foo"]
value = ">"
y = "\n".join([value] + lines)
(lines, value) = monkeyYaml.myMultiline(lines, value)
self.assertEqual(lines, [])
self.assertEqual(value, yaml.load(y))
def test_Multiline_2(self):
lines = [" foo", " bar"]
value = ">"
y = "\n".join([value] + lines)
(lines, value) = monkeyYaml.myMultiline(lines, value)
self.assertEqual(lines, [])
self.assertEqual(value, yaml.load(y))
def test_Multiline_3(self):
lines = [" foo", " bar"]
value = ">"
y = "\n".join([value] + lines)
(lines, value) = monkeyYaml.myMultiline(lines, value)
self.assertEqual(lines, [])
self.assertEqual(value, yaml.load(y))
def test_Multiline_4(self):
lines = [" foo", " bar", " other: 42"]
value = ">"
(lines, value) = monkeyYaml.myMultiline(lines, value)
self.assertEqual(lines, [" other: 42"])
self.assertEqual(value, "foo bar")
def test_myLeading(self):
self.assertEqual(2, monkeyYaml.myLeadingSpaces(" foo"))
self.assertEqual(2, monkeyYaml.myLeadingSpaces(" "))
self.assertEqual(0, monkeyYaml.myLeadingSpaces("\t "))
def test_includes_flow(self):
y = "includes: [a.js,b.js, c_with_wings.js]\n"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_myFlowList_1(self):
y = "[a.js,b.js, c_with_wings.js, 3, 4.12]"
self.assertEqual(monkeyYaml.myFlowList(y), ['a.js', 'b.js', 'c_with_wings.js', 3, 4.12])
def test_multiline_list_1(self):
y = "foo:\n - bar\n - baz"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_multiline_list2(self):
self.assertEqual(monkeyYaml.myRemoveListHeader(2, " - foo"), "foo")
def test_multiline_list3(self):
(lines, value) = monkeyYaml.myMultilineList([" - foo", " - bar", "baz: bletch"], "")
self.assertEqual(lines, ["baz: bletch"])
self.assertEqual(value, ["foo", "bar"])
def test_multiline_list_carriage_return(self):
y = "foo:\r\n - bar\r\n - baz"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_oneline_indented(self):
y = " foo: bar\n baz: baf\n"
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_indentation_215(self):
self.maxDiff = None
y = """
description: >
The method should exist on the Array prototype, and it should be writable
and configurable, but not enumerable.
includes: [propertyHelper.js]
es6id: 22.1.3.13
"""
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_indentation_215_2(self):
self.maxDiff = None
y = """
description: >
The method should exist
includes: [propertyHelper.js]
es6id: 22.1.3.13
"""
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_line_folding(self):
self.maxDiff = None
y = """
description: aaa
bbb
es6id: 19.1.2.1
"""
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_line_folding_2(self):
self.maxDiff = None
y = """
description: ccc
ddd
es6id: 19.1.2.1
"""
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_line_folding_3(self):
self.maxDiff = None
y = """
description: eee
fff
es6id: 19.1.2.1
"""
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
def test_line_folding_4(self):
self.maxDiff = None
y = """
description: ggg
hhh
iii
jjj
es6id: 19.1.2.1
"""
self.assertEqual(monkeyYaml.load(y), yaml.load(y))
if __name__ == '__main__':
unittest.main()
| {
"content_hash": "95391f5fd8b10afa6c72c6f9136976d9",
"timestamp": "",
"source": "github",
"line_count": 180,
"max_line_length": 98,
"avg_line_length": 27.677777777777777,
"alnum_prop": 0.5826977117623444,
"repo_name": "baslr/ArangoDB",
"id": "64744f64e0b43cb164e2e4f029453749d3bc17b1",
"size": "5129",
"binary": false,
"copies": "3",
"ref": "refs/heads/3.1-silent",
"path": "3rdParty/V8/V8-5.0.71.39/test/test262/data/tools/packaging/test/test_monkeyYaml.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Ada",
"bytes": "89080"
},
{
"name": "Assembly",
"bytes": "391227"
},
{
"name": "Awk",
"bytes": "4272"
},
{
"name": "Batchfile",
"bytes": "62892"
},
{
"name": "C",
"bytes": "7932707"
},
{
"name": "C#",
"bytes": "96430"
},
{
"name": "C++",
"bytes": "284363933"
},
{
"name": "CLIPS",
"bytes": "5291"
},
{
"name": "CMake",
"bytes": "681903"
},
{
"name": "CSS",
"bytes": "1036656"
},
{
"name": "CWeb",
"bytes": "174166"
},
{
"name": "Cuda",
"bytes": "52444"
},
{
"name": "DIGITAL Command Language",
"bytes": "259402"
},
{
"name": "Emacs Lisp",
"bytes": "14637"
},
{
"name": "Fortran",
"bytes": "1856"
},
{
"name": "Groovy",
"bytes": "131"
},
{
"name": "HTML",
"bytes": "2318016"
},
{
"name": "Java",
"bytes": "2325801"
},
{
"name": "JavaScript",
"bytes": "67878359"
},
{
"name": "LLVM",
"bytes": "24129"
},
{
"name": "Lex",
"bytes": "1231"
},
{
"name": "Lua",
"bytes": "16189"
},
{
"name": "M4",
"bytes": "600550"
},
{
"name": "Makefile",
"bytes": "509612"
},
{
"name": "Max",
"bytes": "36857"
},
{
"name": "Module Management System",
"bytes": "1545"
},
{
"name": "NSIS",
"bytes": "28404"
},
{
"name": "Objective-C",
"bytes": "19321"
},
{
"name": "Objective-C++",
"bytes": "2503"
},
{
"name": "PHP",
"bytes": "98503"
},
{
"name": "Pascal",
"bytes": "145688"
},
{
"name": "Perl",
"bytes": "720157"
},
{
"name": "Perl 6",
"bytes": "9918"
},
{
"name": "Python",
"bytes": "5859911"
},
{
"name": "QMake",
"bytes": "16692"
},
{
"name": "R",
"bytes": "5123"
},
{
"name": "Rebol",
"bytes": "354"
},
{
"name": "Roff",
"bytes": "1010686"
},
{
"name": "Ruby",
"bytes": "922159"
},
{
"name": "SAS",
"bytes": "1847"
},
{
"name": "Scheme",
"bytes": "10604"
},
{
"name": "Shell",
"bytes": "511077"
},
{
"name": "Swift",
"bytes": "116"
},
{
"name": "Tcl",
"bytes": "1172"
},
{
"name": "TeX",
"bytes": "32117"
},
{
"name": "Vim script",
"bytes": "4075"
},
{
"name": "Visual Basic",
"bytes": "11568"
},
{
"name": "XSLT",
"bytes": "551977"
},
{
"name": "Yacc",
"bytes": "53005"
}
],
"symlink_target": ""
} |
from flask.ext.wtf import Form
from flask.ext.babel import gettext
from wtforms import TextField, BooleanField, RadioField
from wtforms.validators import Required, Regexp, Optional
from wtforms import IntegerField, HiddenField
from wtforms import ValidationError
from wtforms.validators import StopValidation
from wtforms import SelectField
from app.models import Question, QuestionChoice, QuestionYN, QuestionText,Answer, QuestionLikertScale
from flask import g, flash
from app import db
from utiles import generate_answer
class LikertField(RadioField):
'''my implement of likert field'''
def __init__(self, label='', validators=None, labelMin="", labelMax="", **kwargs):
self.labelMin=labelMin
self.labelMax=labelMax
super(LikertField, self).__init__(label, validators, **kwargs)
def __call__(self, **kwargs):
'''render likert as table
'''
from wtforms.widgets.core import html_params, HTMLString
kwargs.setdefault('id', self.id)
kwargs.setdefault('class_', " table table-condensed likert")
html = ['<%s %s>' % ("table", html_params(**kwargs))]
html.append('<tr>')
html.append('<td></td>')
for subfield in self:
html.append('<td>%s</td>' % (subfield.label))
html.append('</tr>')
html.append('<tr>')
html.append('<td class="type-info">%s</td>' % (self.labelMin))
for subfield in self:
html.append('<td>%s</td>' % (subfield()))
html.append('<td class="type-info">%s</td>' % (self.labelMax))
html.append('</tr>')
html.append('</%s>' % "table")
return HTMLString(''.join(html))
# return super(RadioFeild, self).__call__(**kwargs)
def __call1__(self, **kwargs):
'''render likert as list
'''
from wtforms.widgets.core import html_params, HTMLString
kwargs.setdefault('id', self.id)
kwargs.setdefault('class_', "likert")
html = ['<%s %s>' % (self.widget.html_tag, html_params(**kwargs))]
html.append('<li>%s</li>' % (self.labelMin))
for subfield in self:
if self.widget.prefix_label:
html.append('<li>%s %s</li>' % (subfield.label, subfield()))
else:
html.append('<li>%s %s</li>' % (subfield(), subfield.label))
html.append('<li>%s</li>' % (self.labelMax))
html.append('</%s>' % self.widget.html_tag)
return HTMLString(''.join(html))
# return super(RadioField, self).__call__(**kwargs)
class MyRadioField(RadioField):
def __init__(self, label='', validators=None, horizontal=False,**kwargs):
self.horizontal=horizontal
# kwargs.setdefault('coerce', "int")
super(MyRadioField, self).__init__(label, validators, **kwargs)
def __call__(self, **kwargs):
if self.horizontal:
# kwargs.setdefault('class_', "radioField_horizontal")
self.widget.prefix_label=True
from wtforms.widgets.core import html_params, HTMLString
kwargs.setdefault('id', self.id)
kwargs.setdefault('class_', " table table-condensed likert")
html = ['<%s %s>' % ("table", html_params(**kwargs))]
html.append('<tr>')
for subfield in self:
html.append('<td>%s</td>' % (subfield.label))
html.append('</tr>')
html.append('<tr>')
for subfield in self:
html.append('<td>%s</td>' % (subfield()))
html.append('</tr>')
html.append('</%s>' % "table")
return HTMLString(''.join(html))
else:
kwargs.setdefault('class_', "radio")
self.widget.prefix_label=False
return super(MyRadioField, self).__call__(**kwargs)
class CheckAnswerExpected(object):
'''check if the answer is the expected
'''
def __init__(self, message=None):
if not message:
self.message = gettext("Respuesta incorrecta")
else: # pragma: no cover
self.message = 'Respuesta incorrecta. ' + message
self.message_continue = gettext("Respuesta incorrecta, puedes continuar")
def __call__(self, form, field):
question = Question.query.get(field.name[1:])
answer = generate_answer(question, form, g.user)
db.session.add(answer)
db.session.commit()
if not answer.answerAttempt():
if answer.isMoreAttempt():
flash("Respuesta incorrecta")
raise ValidationError(self.message)
else:
flash(self.message_continue)
class CheckSubquestion(object):
'''check whether to answer the question or not
'''
def __call__(self, form, field):
question = Question.query.get(field.name[1:])
data = form["c"+str(question.parent.id)].data
if isinstance (question.parent,QuestionYN):
if data.lower()==question.condition.value.lower():
pass
# raise ValidationError('This field is required.')
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
if isinstance (question.parent,QuestionText) or \
isinstance(question.parent,QuestionChoice):
if question.condition.operation=="<":
if data<question.condition.value:
pass
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
if question.condition.operation=="==":
if data==question.condition.value:
pass
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
if question.condition.operation==">":
if int(data)>int(question.condition.value):
pass
else:
# nothing to check
field.errors[:] = []
raise StopValidation()
class RequiredSelectField(object):
''' check if there is answer
'''
def __init__(self, message=None):
if not message:
self.message = gettext("Option not valid")
else: # pragma: no cover
self.message = message
def __call__(self, form, field):
if field.data=="":
raise ValidationError(gettext("Option not valid"))
def generate_form(questions):
'''dynamically generates the forms for surveys
'''
def frange(x, y, jump):
'''implement of range to floats:
'''
while x < y:
if x % 1 == 0:
yield '%.0f' % x
else:
yield '%.2f' % x
# yield '{0:g}'.format(float(x))
x += jump
class AnswerForm(Form):
time = HiddenField('time',default=0)
for question in questions:
setattr(AnswerForm,"globalTimec"+str(question.id),HiddenField('globalTimec'+str(question.id),default=0))
setattr(AnswerForm,"differentialTimec"+str(question.id),HiddenField('differentialTimec'+str(question.id),default=0))
#added "c" to that the key is valid
#First element must be a string, otherwise fail to valid choice
if isinstance (question,QuestionYN):
choices = [('Yes',gettext('Yes')),('No',gettext('No'))]
if question.isSubquestion:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices,validators = [CheckSubquestion()]))
else:
if question.isExpectedAnswer():
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices, validators = [Required(),CheckAnswerExpected(message=question.help_text)]))
elif question.required:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices,validators = [Required()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
choices = choices,validators = [Optional()]))
if isinstance (question,QuestionText):
if question.isSubquestion:
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer',
validators = [CheckSubquestion()]))
else:
if question.required:
if question.regularExpression !="":
if question.isExpectedAnswer():
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',
validators=[Required(), Regexp(question.regularExpression,0,question.errorMessage),
CheckAnswerExpected(message=question.help_text)]))
else:
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',
validators=[Required(), Regexp(question.regularExpression,0,question.errorMessage)]))
elif question.isNumber:
if question.isExpectedAnswer():
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer',validators = [Required(),
CheckAnswerExpected(message=question.help_text)]))
else:
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer'))
else:
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',validators = [Required()]))
else:
if question.regularExpression !="":
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',
validators=[Optional(), Regexp(question.regularExpression,0,question.errorMessage)]))
elif question.isNumber:
setattr(AnswerForm,"c"+str(question.id),IntegerField('Answer',validators = [Optional()]))
else:
setattr(AnswerForm,"c"+str(question.id),TextField('Answer',validators = [Optional()]))
if isinstance (question,QuestionChoice):
if question.is_range:
list = [(str(index),choice) for index,choice in enumerate(
frange(question.range_min,
question.range_max+question.range_step,
question.range_step))]
else:
list = [(str(index),choice) for index, choice in enumerate(question.choices)]
if question.render == "select":
list.insert(0,("",""))
if question.isSubquestion:
if question.render=="select":
setattr(AnswerForm,"c"+str(question.id),SelectField('Answer',
choices = list,validators = [RequiredSelectField(),CheckSubquestion()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
horizontal=question.render=="horizontal",
choices = list,validators = [CheckSubquestion()]))
else:
if question.required:
if question.render =="select":
setattr(AnswerForm,"c"+str(question.id),SelectField('Answer',
choices = list,validators = [RequiredSelectField()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
horizontal=question.render=="horizontal",
choices = list,validators = [Required()]))
else:
if question.render =="select":
setattr(AnswerForm,"c"+str(question.id),SelectField('Answer',
choices = list,validators = [RequiredSelectField()]))
else:
setattr(AnswerForm,"c"+str(question.id),MyRadioField('Answer',
horizontal=question.render=="horizontal",
choices = list,validators = [Optional()]))
if isinstance (question, QuestionLikertScale):
list = [(str(index),choice) for index,choice in enumerate(range(question.minLikert,question.maxLikert+1))]
if question.required:
setattr(AnswerForm,"c"+str(question.id),LikertField('Answer',
choices = list,
labelMin= question.labelMin,
labelMax=question.labelMax,
validators = [Required()]))
else:
setattr(AnswerForm,"c"+str(question.id),RadioField('Answer',
choices = list,validators = [Optional()]))
form = AnswerForm()
return form | {
"content_hash": "771b01b596ed201191f8f5107bc7d814",
"timestamp": "",
"source": "github",
"line_count": 287,
"max_line_length": 124,
"avg_line_length": 45.64459930313589,
"alnum_prop": 0.5426717557251909,
"repo_name": "nukru/projectQ",
"id": "ff55b80a9cef6abaef8dd2562e2e87fd74d4bca0",
"size": "13100",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/surveys/forms.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "31508"
},
{
"name": "JavaScript",
"bytes": "195718"
},
{
"name": "PHP",
"bytes": "1082"
},
{
"name": "Python",
"bytes": "283141"
},
{
"name": "Shell",
"bytes": "46"
}
],
"symlink_target": ""
} |
extensions = [
'openstackdocstheme',
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
author = 'OpenStack-Ansible Contributors'
category = 'Miscellaneous'
copyright = '2014-2016, OpenStack-Ansible Contributors'
description = 'OpenStack-Ansible deploys OpenStack environments using Ansible.'
project = 'OpenStack-Ansible'
role_name = 'rsyslog_server'
target_name = 'openstack-ansible-' + role_name
title = 'OpenStack-Ansible Documentation: ' + role_name + 'role'
# openstackdocstheme options
openstackdocs_repo_name = 'openstack/' + target_name
openstackdocs_bug_project = project.lower()
openstackdocs_bug_tag = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'openstackdocs'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = target_name + '-docs'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, target_name + '.tex',
title, author, 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, target_name,
title, [author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, target_name,
title, author, project,
description, category),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
| {
"content_hash": "b87c90b1a723502b30133e60b5b56d1d",
"timestamp": "",
"source": "github",
"line_count": 253,
"max_line_length": 79,
"avg_line_length": 32.32806324110672,
"alnum_prop": 0.7009414353832987,
"repo_name": "openstack/openstack-ansible-rsyslog_server",
"id": "b39ce1792ad9856874cc1e9029baae99fd01b030",
"size": "9633",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "doc/source/conf.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Jinja",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "8923"
},
{
"name": "Shell",
"bytes": "3495"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Asset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('snow_id', models.CharField(help_text='ServiceNow database ID', max_length=255)),
],
),
migrations.CreateModel(
name='Domain',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(help_text='partially qualified domain name', max_length=255, unique=True)),
],
),
migrations.CreateModel(
name='DomainName',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
],
),
migrations.CreateModel(
name='Geo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
],
),
migrations.CreateModel(
name='HardwareType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='NetInterface',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30)),
('mac_address', models.CharField(max_length=18)),
],
),
migrations.CreateModel(
name='NetIPAddress',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ip', models.GenericIPAddressField()),
('interface', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.NetInterface')),
],
),
migrations.CreateModel(
name='OSDistribution',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='OSDistributionMajorVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
('version', models.IntegerField()),
('os_distribution', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.OSDistribution')),
],
),
migrations.CreateModel(
name='OSDistributionVersion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('version', models.CharField(max_length=5)),
('os_distribution', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.OSDistribution')),
('os_distribution_major_version', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.OSDistributionMajorVersion')),
],
),
migrations.CreateModel(
name='OSFamily',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=30, unique=True)),
],
),
migrations.CreateModel(
name='Host',
fields=[
('asset_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='inventory.Asset')),
('local_name', models.CharField(max_length=30)),
('dns_name', models.CharField(max_length=255)),
('vm_name', models.CharField(max_length=30)),
('domain', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.Domain')),
('hardware_type', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.HardwareType')),
],
bases=('inventory.asset',),
),
migrations.AddField(
model_name='osdistribution',
name='os_family',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.OSFamily'),
),
migrations.AddField(
model_name='domainname',
name='ip_address',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.NetIPAddress'),
),
migrations.AddField(
model_name='asset',
name='geo',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.Geo'),
),
migrations.AlterUniqueTogether(
name='osdistributionversion',
unique_together=set([('os_distribution', 'version'), ('os_distribution_major_version', 'version')]),
),
migrations.AlterUniqueTogether(
name='osdistributionmajorversion',
unique_together=set([('os_distribution', 'version')]),
),
migrations.AddField(
model_name='netinterface',
name='host',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='inventory.Host'),
),
migrations.AddField(
model_name='host',
name='os_distribution',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.OSDistribution'),
),
migrations.AddField(
model_name='host',
name='os_distribution_version',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.OSDistributionVersion'),
),
migrations.AddField(
model_name='host',
name='os_family',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='inventory.OSFamily'),
),
migrations.AddField(
model_name='host',
name='parent_host',
field=models.ForeignKey(help_text='VM parent host', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='guest', to='inventory.Host'),
),
]
| {
"content_hash": "ab591e351127280473b9fcefc38701f4",
"timestamp": "",
"source": "github",
"line_count": 158,
"max_line_length": 191,
"avg_line_length": 45.360759493670884,
"alnum_prop": 0.5729035858797266,
"repo_name": "H0neyBadger/cmdb",
"id": "e3e098c454ff1c5719110bbfd0f8535ada4f76a8",
"size": "7240",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "inventory/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "41045"
},
{
"name": "Shell",
"bytes": "136"
}
],
"symlink_target": ""
} |
"""BayesianOptimization is used""" | {
"content_hash": "8f4664d418af5a4c3df5369d1266eb99",
"timestamp": "",
"source": "github",
"line_count": 1,
"max_line_length": 34,
"avg_line_length": 34,
"alnum_prop": 0.7647058823529411,
"repo_name": "odb9402/OPPA",
"id": "163e0893a104b7eef19cf27a2d91dba3129113f6",
"size": "34",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "oppa/BayesianOptimization/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "C++",
"bytes": "5362"
},
{
"name": "Python",
"bytes": "99784"
},
{
"name": "R",
"bytes": "37828"
},
{
"name": "Shell",
"bytes": "576"
}
],
"symlink_target": ""
} |
"""Auto-generated file, do not edit by hand. 800 metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_800 = PhoneMetadata(id='001', country_code=800, international_prefix=None,
general_desc=PhoneNumberDesc(national_number_pattern='(?:00|[1-9]\\d)\\d{6}', possible_length=(8,)),
toll_free=PhoneNumberDesc(national_number_pattern='(?:00|[1-9]\\d)\\d{6}', example_number='12345678', possible_length=(8,)),
number_format=[NumberFormat(pattern='(\\d{4})(\\d{4})', format='\\1 \\2', leading_digits_pattern=['\\d'])])
| {
"content_hash": "9d06c71bc80aa5b32764dbeb7ed6e736",
"timestamp": "",
"source": "github",
"line_count": 7,
"max_line_length": 128,
"avg_line_length": 81.57142857142857,
"alnum_prop": 0.6952714535901926,
"repo_name": "daviddrysdale/python-phonenumbers",
"id": "5c5026dbec1349080625bf54dda73ffcb8772f2d",
"size": "571",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "python/phonenumbers/data/region_800.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Java",
"bytes": "3898"
},
{
"name": "Makefile",
"bytes": "9034"
},
{
"name": "Python",
"bytes": "22052087"
},
{
"name": "Ruby",
"bytes": "237"
}
],
"symlink_target": ""
} |
import getpass
import os
os.system("clear")
pw = getpass.getpass()
minuscula=0
mayuscula=0
numero=0
especial =0
if (len(pw)<5) or (len(pw)>15):
print("Error, out of range")
for i in pw:
if (ord(i)>47) and (ord(i)<58):
numero = 1
elif (ord(i)>64) and (ord(i)<91):
mayuscula = 1
elif (ord(i)>96) and (ord(i)<123):
minuscula = 1
else:
especial = 1
if (minuscula == 1 and mayuscula == 0 and numero == 0 and especial ==0) or (minuscula == 0 and mayuscula == 1 and numero == 0 and especial ==0) or (minuscula == 0 and mayuscula == 0 and numero == 1 and especial ==0):
print("nivel 1")
elif (minuscula == 1 and mayuscula == 1 and numero == 0 and especial ==0) or (minuscula == 1 and mayuscula == 0 and numero == 1 and especial ==0) or (minuscula == 0 and mayuscula == 1 and numero == 1 and especial ==0):
print("nivel 2")
elif(minuscula == 1 and mayuscula == 0 and numero == 0 and especial ==1) or (minuscula == 0 and mayuscula == 1 and numero == 0 and especial ==1) or (minuscula == 0 and mayuscula == 0 and numero == 1 and especial ==1):
print("nivel 3")
elif(minuscula == 1 and mayuscula == 1 and numero == 1 and especial ==0):
print("nivel 4")
elif(minuscula == 1 and mayuscula == 1 and numero == 1 and especial ==1):
print("nivel 5")
print(pw)
| {
"content_hash": "97c774ff2c5c00f0166926d2ee5952ad",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 218,
"avg_line_length": 43.8,
"alnum_prop": 0.619482496194825,
"repo_name": "Am3ra/CS",
"id": "308d5a389e0e21091c36223a3389ca697fda9bf4",
"size": "1314",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CS101/Python/pass.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Assembly",
"bytes": "4379"
},
{
"name": "C",
"bytes": "97085"
},
{
"name": "C++",
"bytes": "6575"
},
{
"name": "CSS",
"bytes": "485"
},
{
"name": "HTML",
"bytes": "4137"
},
{
"name": "Java",
"bytes": "345185"
},
{
"name": "JavaScript",
"bytes": "1206"
},
{
"name": "Python",
"bytes": "59876"
},
{
"name": "Rust",
"bytes": "28"
}
],
"symlink_target": ""
} |
"""Common settings and globals."""
from os.path import abspath, basename, dirname, join, normpath
from sys import path
########## PATH CONFIGURATION
# Absolute filesystem path to the Django project directory:
DJANGO_ROOT = dirname(dirname(abspath(__file__)))
# Absolute filesystem path to the top-level project folder:
SITE_ROOT = dirname(DJANGO_ROOT)
# Site name:
SITE_NAME = basename(DJANGO_ROOT)
# Add our project to our pythonpath, this way we don't need to type our project
# name in our dotted import paths:
path.append(DJANGO_ROOT)
########## END PATH CONFIGURATION
########## DEBUG CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#debug
DEBUG = False
########## END DEBUG CONFIGURATION
########## MANAGER CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#admins
ADMINS = (
('Your Name', 'your_email@example.com'),
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#managers
MANAGERS = ADMINS
########## END MANAGER CONFIGURATION
########## DATABASE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.',
'NAME': '',
'USER': '',
'PASSWORD': '',
'HOST': '',
'PORT': '',
}
}
########## END DATABASE CONFIGURATION
########## GENERAL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#time-zone
TIME_ZONE = 'America/Los_Angeles'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#language-code
LANGUAGE_CODE = 'en-us'
# See: https://docs.djangoproject.com/en/dev/ref/settings/#site-id
SITE_ID = 1
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-i18n
USE_I18N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-l10n
USE_L10N = True
# See: https://docs.djangoproject.com/en/dev/ref/settings/#use-tz
USE_TZ = True
########## END GENERAL CONFIGURATION
########## MEDIA CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-root
MEDIA_ROOT = normpath(join(SITE_ROOT, 'media'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#media-url
MEDIA_URL = '/media/'
########## END MEDIA CONFIGURATION
########## STATIC FILE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-root
STATIC_ROOT = normpath(join(SITE_ROOT, 'assets'))
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = '/static/'
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#std:setting-STATICFILES_DIRS
STATICFILES_DIRS = (
normpath(join(SITE_ROOT, 'static')),
)
# See: https://docs.djangoproject.com/en/dev/ref/contrib/staticfiles/#staticfiles-finders
STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
)
########## END STATIC FILE CONFIGURATION
########## SECRET CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key should only be used for development and testing.
SECRET_KEY = r"ps%bhx8w6xy!3!skp6docj*%mx5049v+xznyv0(u)-uqbs6=5b"
########## END SECRET CONFIGURATION
########## SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = []
########## END SITE CONFIGURATION
########## FIXTURE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#std:setting-FIXTURE_DIRS
FIXTURE_DIRS = (
normpath(join(SITE_ROOT, 'fixtures')),
)
########## END FIXTURE CONFIGURATION
########## TEMPLATE CONFIGURATION
# see: https://docs.djangoproject.com/en/1.9/ref/templates/upgrading/
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [normpath(join(SITE_ROOT, 'templates')), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.request'
],
},
},
]
########## END TEMPLATE CONFIGURATION
########## MIDDLEWARE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#middleware-classes
MIDDLEWARE_CLASSES = (
# Default Django middleware.
'django.middleware.common.CommonMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
########## END MIDDLEWARE CONFIGURATION
########## URL CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#root-urlconf
ROOT_URLCONF = '%s.urls' % SITE_NAME
########## END URL CONFIGURATION
########## APP CONFIGURATION
DJANGO_APPS = (
# Default Django apps:
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
# Useful template tags:
# 'django.contrib.humanize',
# Admin panel and documentation:
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# Apps specific for this project go here.
LOCAL_APPS = (
'store',
'article',
)
# See: https://docs.djangoproject.com/en/dev/ref/settings/#installed-apps
INSTALLED_APPS = DJANGO_APPS + LOCAL_APPS
########## END APP CONFIGURATION
########## LOGGING CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#logging
# A sample logging configuration. The only tangible logging
# performed by this configuration is to send an email to
# the site admins on every HTTP 500 error when DEBUG=False.
# See http://docs.djangoproject.com/en/dev/topics/logging for
# more details on how to customize your logging configuration.
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'handlers': {
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler'
}
},
'loggers': {
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
}
}
########## END LOGGING CONFIGURATION
########## WSGI CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#wsgi-application
WSGI_APPLICATION = '%s.wsgi.application' % SITE_NAME
########## END WSGI CONFIGURATION
########## REST CONFIGURATION
INSTALLED_APPS += (
'rest_framework',
)
REST_FRAMEWORK = {
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.BasicAuthentication',
'rest_framework.authentication.SessionAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': ('rest_framework.permissions.IsAdminUser',),
'PAGE_SIZE': 10
}
########## END REST CONFIGURATION
# #### CRISPY CONFIGURATION
INSTALLED_APPS += (
'crispy_forms',
)
CRISPY_TEMPLATE_PACK = 'bootstrap3'
CRISPY_FAIL_SILENTLY = not DEBUG
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
| {
"content_hash": "f68e5bb4dc5b28b64561d0e445269272",
"timestamp": "",
"source": "github",
"line_count": 265,
"max_line_length": 98,
"avg_line_length": 29.30943396226415,
"alnum_prop": 0.6642204197244753,
"repo_name": "smillerpy/shoes",
"id": "b47323d4a95072aef97645af9181230155ad0277",
"size": "7767",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "super_shoes/super_shoes/settings/base.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "39"
},
{
"name": "HTML",
"bytes": "3351"
},
{
"name": "JavaScript",
"bytes": "45"
},
{
"name": "Python",
"bytes": "31007"
}
],
"symlink_target": ""
} |
from modelmapper.declarations import Mapper, Field
from modelmapper.qt.fields import QLineEditAccessor
class String(QLineEditAccessor):
def get_value(self):
return str(self.widget.text())
def set_value(self, value):
self.widget.setText(str(value))
class Integer(QLineEditAccessor):
def get_value(self):
return int(self.widget.text())
def set_value(self, value):
self.widget.setText(int(value))
def get_child_x_mapper(x):
return {
'{}_link'.format(x): (x, 'val_{}'.format(x))
}
def get_d_mapper():
return {
'expediente_link': Mapper('c[0]', 'val_c[0]', get_child_x_mapper('a')),
'masa_bruta_link': Mapper('c[1]', 'val_c[1]', get_child_x_mapper('b')),
'nombre_link': Field('cc', 'val_cc'),
}
def get_model_mapper():
return {
'expediente_link': Field('expediente', String('expediente')),
'masa_bruta_link': Field('masa_bruta', Integer('masa_bruta')),
'nombre_link': Field('nombre', String('nombre'))
}
| {
"content_hash": "e85b71a8c5aa87e4f5926e88a3c61499",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 79,
"avg_line_length": 24.761904761904763,
"alnum_prop": 0.6057692307692307,
"repo_name": "franramirez688/model-mapper",
"id": "2e1ea26357eac72644bfd8a4e511ade260d1740c",
"size": "1040",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/factory/qt/mapper_data.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "62221"
}
],
"symlink_target": ""
} |
"""Test to verify that we can load components."""
# pylint: disable=protected-access
import asyncio
import unittest
import pytest
import homeassistant.loader as loader
import homeassistant.components.http as http
from tests.common import (
get_test_home_assistant, MockModule, async_mock_service)
class TestLoader(unittest.TestCase):
"""Test the loader module."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up tests."""
self.hass = get_test_home_assistant()
# pylint: disable=invalid-name
def tearDown(self):
"""Stop everything that was started."""
self.hass.stop()
def test_set_component(self):
"""Test if set_component works."""
comp = object()
loader.set_component(self.hass, 'switch.test_set', comp)
assert loader.get_component(self.hass, 'switch.test_set') is comp
def test_get_component(self):
"""Test if get_component works."""
self.assertEqual(http, loader.get_component(self.hass, 'http'))
self.assertIsNotNone(loader.get_component(self.hass, 'light.hue'))
def test_load_order_component(self):
"""Test if we can get the proper load order of components."""
loader.set_component(self.hass, 'mod1', MockModule('mod1'))
loader.set_component(self.hass, 'mod2', MockModule('mod2', ['mod1']))
loader.set_component(self.hass, 'mod3', MockModule('mod3', ['mod2']))
self.assertEqual(
['mod1', 'mod2', 'mod3'],
loader.load_order_component(self.hass, 'mod3'))
# Create circular dependency
loader.set_component(self.hass, 'mod1', MockModule('mod1', ['mod3']))
self.assertEqual([], loader.load_order_component(self.hass, 'mod3'))
# Depend on non-existing component
loader.set_component(self.hass, 'mod1',
MockModule('mod1', ['nonexisting']))
self.assertEqual([], loader.load_order_component(self.hass, 'mod1'))
# Try to get load order for non-existing component
self.assertEqual([], loader.load_order_component(self.hass, 'mod1'))
def test_component_loader(hass):
"""Test loading components."""
components = loader.Components(hass)
assert components.http.CONFIG_SCHEMA is http.CONFIG_SCHEMA
assert hass.components.http.CONFIG_SCHEMA is http.CONFIG_SCHEMA
def test_component_loader_non_existing(hass):
"""Test loading components."""
components = loader.Components(hass)
with pytest.raises(ImportError):
components.non_existing
@asyncio.coroutine
def test_component_wrapper(hass):
"""Test component wrapper."""
calls = async_mock_service(hass, 'light', 'turn_on')
components = loader.Components(hass)
components.light.async_turn_on('light.test')
yield from hass.async_block_till_done()
assert len(calls) == 1
@asyncio.coroutine
def test_helpers_wrapper(hass):
"""Test helpers wrapper."""
helpers = loader.Helpers(hass)
result = []
def discovery_callback(service, discovered):
"""Handle discovery callback."""
result.append(discovered)
helpers.discovery.async_listen('service_name', discovery_callback)
yield from helpers.discovery.async_discover('service_name', 'hello')
yield from hass.async_block_till_done()
assert result == ['hello']
async def test_custom_component_name(hass):
"""Test the name attribte of custom components."""
comp = loader.get_component(hass, 'test_standalone')
assert comp.__name__ == 'custom_components.test_standalone'
assert comp.__package__ == 'custom_components'
comp = loader.get_component(hass, 'test_package')
assert comp.__name__ == 'custom_components.test_package'
assert comp.__package__ == 'custom_components.test_package'
comp = loader.get_component(hass, 'light.test')
assert comp.__name__ == 'custom_components.light.test'
assert comp.__package__ == 'custom_components.light'
# Test custom components is mounted
from custom_components.test_package import TEST
assert TEST == 5
async def test_log_warning_custom_component(hass, caplog):
"""Test that we log a warning when loading a custom component."""
loader.get_component(hass, 'test_standalone')
assert \
'You are using a custom component for test_standalone' in caplog.text
loader.get_component(hass, 'light.test')
assert 'You are using a custom component for light.test' in caplog.text
| {
"content_hash": "0e68e1368785980fd37ade2d04c9ddae",
"timestamp": "",
"source": "github",
"line_count": 136,
"max_line_length": 77,
"avg_line_length": 33.044117647058826,
"alnum_prop": 0.6688918558077437,
"repo_name": "persandstrom/home-assistant",
"id": "4beb7db570e4cd580bef790ef30fa11c6ce991bf",
"size": "4494",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_loader.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "1067"
},
{
"name": "Python",
"bytes": "11745210"
},
{
"name": "Ruby",
"bytes": "518"
},
{
"name": "Shell",
"bytes": "16652"
}
],
"symlink_target": ""
} |
"""merged scheduler and mmou code
Revision ID: 9dce37322c71
Revises: 960085fce39c, edc5377c32c7
Create Date: 2016-06-15 10:41:56.784841
"""
# revision identifiers, used by Alembic.
revision = '9dce37322c71'
down_revision = ('960085fce39c', 'edc5377c32c7')
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_development():
pass
def downgrade_development():
pass
def upgrade_test():
pass
def downgrade_test():
pass
def upgrade_production():
pass
def downgrade_production():
pass
| {
"content_hash": "6cf4ebf8f372058a1b0274337b506633",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 48,
"avg_line_length": 13.76923076923077,
"alnum_prop": 0.696927374301676,
"repo_name": "c4fcm/CivilServant",
"id": "7b342d0bddd92a2e90bf19702369a3cdbf63c33c",
"size": "716",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "alembic/versions/9dce37322c71_merged_scheduler_and_mmou_code.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Mako",
"bytes": "923"
},
{
"name": "Python",
"bytes": "209127"
}
],
"symlink_target": ""
} |
from ui import *
from state import *
from updater import *
from const import DEFAULT_CONFIG
from os import path as op
import sys
from optparse import OptionParser
import curses
import curses.ascii
import os
import stat
import yaml
import locale
import unicodedata
import threading
import Queue
import tweepy
__author__ = 'seikichi@kmc.gr.jp'
__version__ = '0.1'
def main():
sys.path.insert(0,
op.join(op.dirname(op.realpath(__file__)),
'lib'))
parser = OptionParser(version="version %s" % __version__)
parser.add_option('-c', '--config', dest='config',
help="configure file (default: ~/.tuitwirc.yml)")
parser.add_option('-i', '--initialize',
action="store_true", dest='init', default=False,
help="Initialize config file and OAuth.")
(options, args) = parser.parse_args()
if not options.config:
options.config = op.expanduser('~/.tuitwirc.yml')
if not op.exists(options.config) or options.init:
init_config()
TuiTwi(config=options.config).run()
def init_config():
'''OAuthの認証を行い、access_tokenを取得。設定ファイルに保存する'''
oauth_auth = tweepy.OAuthHandler(const.CONSUMER_KEY, const.CONSUMER_SECRET)
# TODO(seikichi) ここでのエラー処理
print 'Please authorize tuitwi: %s' % oauth_auth.get_authorization_url()
verifier = raw_input('PIN: ').strip()
# access_tokenの取得
oauth_auth.get_access_token(verifier)
access_token = oauth_auth.access_token
key = access_token.key
secret = access_token.secret
# デフォルトのYAMLのロード、access_tokenの設定
data = DEFAULT_CONFIG
data['access_token']['key'] = key
data['access_token']['secret'] = secret
data['credential'] = dict(user=oauth_auth.get_username())
# $HOMEに書き込み
rcfile = op.join(os.path.expanduser('~'), '.tuitwirc.yml')
f = open(rcfile, 'w')
yaml.dump(data, f, encoding='utf-8', allow_unicode=True, default_flow_style=False)
os.chmod(rcfile, stat.S_IREAD|stat.S_IWRITE)
f.close()
class TuiTwi(object):
def __init__(self, config):
os.chmod(config, stat.S_IREAD|stat.S_IWRITE)
self.conf = yaml.load(open(config).read().decode('utf8'))
self.event = threading.Event()
self.event.clear()
self.lock = threading.RLock()
self.queue = Queue.Queue()
def run(self):
locale.setlocale(locale.LC_CTYPE, "")
try:
curses.wrapper(self.loop)
except Exception, message:
curses.nocbreak()
curses.echo()
curses.endwin()
print message
self.event.set()
self.updater.join()
self.twitter_communicator.join()
def loop(self, stdscr):
# 色の設定
if curses.has_colors():
curses.use_default_colors()
curses.start_color()
curses.init_pair(1, curses.COLOR_BLUE, -1)
curses.init_pair(2, curses.COLOR_CYAN, -1)
curses.init_pair(3, curses.COLOR_GREEN, -1)
curses.init_pair(4, curses.COLOR_MAGENTA, -1)
curses.init_pair(5, curses.COLOR_RED, -1)
curses.init_pair(6, curses.COLOR_WHITE, -1)
curses.init_pair(7, curses.COLOR_YELLOW, -1)
self.form = Form(stdscr, self.conf)
self.updater = Updater(self.queue, self.conf)
self.twitter_communicator = TwitterCommunicator(self.queue, self.form, self.lock, self.conf)
self.twitter_communicator.start()
self.updater.start()
self.state = ViewState(stdscr, self.form, self.queue, self.conf)
self.form.draw()
curses.doupdate()
stdscr.nodelay(False)
while self.state is not None:
ch = stdscr.getch()
self.lock.acquire()
self.state = self.state.execute(ch)
self.form.draw()
curses.doupdate()
self.lock.release()
| {
"content_hash": "2450667c39956130242c8e2c4599ab4b",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 100,
"avg_line_length": 31.416,
"alnum_prop": 0.6065699006875478,
"repo_name": "seikichi/tuitwi",
"id": "2bc2c160e24e3a65618ed6cf7397836ddbbff88c",
"size": "4084",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tuitwi/__init__.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "52234"
}
],
"symlink_target": ""
} |
from chatterbot import ChatBot
'''
This is an example showing how to create an export file from
an existing chat bot that can then be used to train other bots.
'''
chatbot = ChatBot(
'Export Example Bot',
trainer='chatterbot.trainers.ChatterBotCorpusTrainer'
)
# First, lets train our bot with some data
chatbot.train('chatterbot.corpus.english')
# Now we can export the data to a file
chatbot.trainer.export_for_training('./myfile.json')
| {
"content_hash": "b9ad2e169e8efb838763d494dca0c1e0",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 63,
"avg_line_length": 26.529411764705884,
"alnum_prop": 0.753880266075388,
"repo_name": "osDanielLee/SelfThinkingRobot",
"id": "539a95d0b0041df826d7f66ea885e097a940ad7a",
"size": "451",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "AnalyzeData/examples/export_example.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "277962"
}
],
"symlink_target": ""
} |
from flask import render_template, redirect, request, url_for, flash
from flask_login import login_user, logout_user, login_required, \
current_user
from .import auth
from .. import db
from ..models import User
from ..email import send_email
from .forms import LoginForm, RegistrationForm, ChangePasswordForm,\
ForgetPasswordForm, PasswordResetForm, ChangeEmailForm
@auth.route('/login', methods=['GET', 'POST'])
def login():
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is not None and user.verify_password(form.password.data):
login_user(user, form.remember_me.data)
return redirect(request.args.get('next') or url_for('main.index'))
flash('Invalid username or password.')
return render_template('auth/login.html', form=form)
@auth.route('/logout')
@login_required
def logout():
logout_user()
flash('You have been logged out.')
return redirect(url_for('main.index'))
@auth.route('/register', methods=['GET', 'POST'])
def register():
form = RegistrationForm()
if form.validate_on_submit():
user = User(email=form.email.data,
username=form.username.data,
password=form.password.data)
db.session.add(user)
return redirect(url_for('auth.login'))
return render_template('auth/register.html', form=form)
@auth.route('/change-password', methods=['GET', 'POST'])
@login_required
def change_password():
form = ChangePasswordForm()
if form.validate_on_submit():
if current_user.verify_password(form.old_password.data):
current_user.password = form.password.data
db.session.add(current_user)
flash('Your password has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid password.')
return render_template('auth/change_password.html', form = form)
@auth.route('/reset', methods=['GET', 'POST'])
def password_reset_request():
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = ForgetPasswordForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
token = user.generate_reset_token()
send_email(user.email, 'Reset Your Password',
'auth/email/reset_password',
user=user, token=token,
next=request.args.get('next'))
flash('An email with instructions to reset your password has been '
'sent to you.')
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/reset/<token>', methods=['GET', 'POST'])
def password_reset(token):
if not current_user.is_anonymous:
return redirect(url_for('main.index'))
form = PasswordResetForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user is None:
return redirect(url_for('main.index'))
if user.reset_password(token, form.email.data):
flash('Your password has been updated.')
return redirect(url_for('auth.login'))
else:
return redirect(url_for('main.index'))
return render_template('auth/reset_password.html', form=form)
@auth.route('/change-email', methods=['GET', 'POST'])
@login_required
def change_email_request():
form = ChangeEmailForm()
if form.validate_on_submit():
if current_user.verify_password(form.password.data):
current_user.email = form.email.data
db.session.add(current_user)
flash('Your email address has been updated.')
return redirect(url_for('main.index'))
else:
flash('Invalid email or password.')
return render_template('auth/change_email.html', form=form) | {
"content_hash": "d28c61d9d02cc8c52c77864180052d09",
"timestamp": "",
"source": "github",
"line_count": 106,
"max_line_length": 78,
"avg_line_length": 37.68867924528302,
"alnum_prop": 0.6357947434292867,
"repo_name": "superchilli/webapp",
"id": "7c9baafdca7885e2a5eb8c27e95c3e90fd7c884f",
"size": "3995",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/auth/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "5939"
},
{
"name": "CSS",
"bytes": "13325"
},
{
"name": "HTML",
"bytes": "31292"
},
{
"name": "JavaScript",
"bytes": "21983"
},
{
"name": "Mako",
"bytes": "9463"
},
{
"name": "Python",
"bytes": "12957225"
},
{
"name": "Shell",
"bytes": "3202"
}
],
"symlink_target": ""
} |
from tkinter import *
from tkinter.ttk import *
class Entry(Entry):
def __init__(self, parent):
self.parent = parent
Entry.__init__(self, self.parent)
self.__alignment = W
self.__column = 0
self.__padding = (0, 0)
self.__placed = False
self.__row = 0
def Disable(self):
self.configure(state=DISABLED)
def Enable(self):
self.configure(state=NORMAL)
def Get_Text(self):
return self.get()
def Move_Cursor(self, characterCount):
if characterCount != 0:
self.xview_scroll(characterCount, UNITS)
def Place(self, row, column):
self.__row = row
self.__column = column
self.__placed = True
self.grid(row=self.__row, column=self.__column, padx=self.__padding[0], pady=self.__padding[1], sticky=self.__alignment)
def Set_Alignment(self, alignment):
alignmentOptions = {'left': W, 'right': E, 'top': N, 'bottom': S}
alignment = alignment.lower()
if alignment in alignmentOptions.keys():
self.__alignment = alignmentOptions[alignment]
if self.__placed:
self.grid(sticky=self.__alignment)
def Set_Padding(self, x, y):
self.__padding = (x, y)
if self.__placed:
self.grid(padx=self.__padding[0], pady=self.__padding[1])
def Set_Text(self, text):
self.delete(0, tk.END)
self.insert(0, text)
self.Move_Cursor(len(text))
def Set_Width(self, width):
self.configure(width=width)
| {
"content_hash": "a33ad64d2ea94ae35c6f11b4805c458b",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 128,
"avg_line_length": 24.26153846153846,
"alnum_prop": 0.5688015218769816,
"repo_name": "Kyle-Fagan/tkLibs",
"id": "41cc47f649f29a948a6c88399ab2d8c4b5d32c60",
"size": "1577",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tkLibs/entry.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "10068"
}
],
"symlink_target": ""
} |
class Compressor(object):
def compress(self, toCompress):
if toCompress is None:
return ""
else:
result = []
index = 0
length = len (toCompress)
while index < length:
counter = 1
index += 1
while index < length and toCompress[index - 1] == toCompress[index]:
counter += 1
index += 1
result.append(str(counter))
result.append(toCompress[index - 1])
return ''.join(result)
import unittest
class StringCompressorTest(unittest.TestCase):
def setUp(self):
self.compressor = Compressor()
def test_nothing_is_compressed_to_an_empty_string(self):
self.assertEqual("", self.compressor.compress(None))
def test_compresses_a_single_character_string(self):
self.assertEqual("1a", self.compressor.compress("a"))
def test_compresses_a_string_of_unique_characters(self):
self.assertEqual("1a1b1c", self.compressor.compress("abc"))
def test_compresses_a_string_of_doubled_characters(self):
self.assertEqual("2a2b2c", self.compressor.compress("aabbcc"))
def test_compresses_an_empty_string(self):
self.assertEqual("", self.compressor.compress(""))
| {
"content_hash": "4c1d6a14d59b5c38407dde7a5284481e",
"timestamp": "",
"source": "github",
"line_count": 40,
"max_line_length": 84,
"avg_line_length": 33.1,
"alnum_prop": 0.5921450151057401,
"repo_name": "Alex-Diez/python-tdd-katas",
"id": "d603b368b4e0bd8b8a892a7439399e2c5f153172",
"size": "1350",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "old-katas/string-compress-kata/day-6.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335247"
}
],
"symlink_target": ""
} |
from game import entity
class OnlineStub(entity.StubEntity):
_components = (
'stubs.Online.Online',
)
_stub_name = 'online'
def __init__(self):
print('OnlineStub create!')
| {
"content_hash": "91dec71df7308c10f08e87a69f664c48",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 36,
"avg_line_length": 16.636363636363637,
"alnum_prop": 0.6775956284153005,
"repo_name": "dennisding/ether",
"id": "443a8bfebbcf936ae759de260f1a58d377523ab9",
"size": "207",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "entities/OnlineStub.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "75"
},
{
"name": "Python",
"bytes": "75989"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function, unicode_literals
import hashlib
import json
import os
from os.path import abspath, basename, dirname, isdir, isfile, islink, join
import re
import tarfile
import tempfile
from .._vendor.auxlib.entity import EntityEncoder
from ..base.constants import CONDA_PACKAGE_EXTENSION_V1
from ..base.context import context
from ..common.compat import PY3
from ..common.path import paths_equal
from ..core.prefix_data import PrefixData
from ..gateways.disk.delete import rmtree
from ..install import PREFIX_PLACEHOLDER
from ..misc import untracked
def remove(prefix, files):
"""
Remove files for a given prefix.
"""
dst_dirs = set()
for f in files:
dst = join(prefix, f)
dst_dirs.add(dirname(dst))
os.unlink(dst)
for path in sorted(dst_dirs, key=len, reverse=True):
try:
os.rmdir(path)
except OSError: # directory might not be empty
pass
def execute(args, parser):
prefix = context.target_prefix
if args.which:
for path in args.which:
for prec in which_package(path):
print('%-50s %s' % (path, prec.dist_str()))
return
print('# prefix:', prefix)
if args.reset:
remove(prefix, untracked(prefix))
return
if args.untracked:
files = sorted(untracked(prefix))
print('# untracked files: %d' % len(files))
for fn in files:
print(fn)
return
make_tarbz2(prefix,
name=args.pkg_name.lower(),
version=args.pkg_version,
build_number=int(args.pkg_build))
def get_installed_version(prefix, name):
for info in PrefixData(prefix).iter_records():
if info['name'] == name:
return str(info['version'])
return None
def create_info(name, version, build_number, requires_py):
d = dict(
name=name,
version=version,
platform=context.platform,
arch=context.arch_name,
build_number=int(build_number),
build=str(build_number),
depends=[],
)
if requires_py:
d['build'] = ('py%d%d_' % requires_py) + d['build']
d['depends'].append('python %d.%d*' % requires_py)
return d
shebang_pat = re.compile(r'^#!.+$', re.M)
def fix_shebang(tmp_dir, path):
if open(path, 'rb').read(2) != '#!':
return False
with open(path) as fi:
data = fi.read()
m = shebang_pat.match(data)
if not (m and 'python' in m.group()):
return False
data = shebang_pat.sub('#!%s/bin/python' % PREFIX_PLACEHOLDER,
data, count=1)
tmp_path = join(tmp_dir, basename(path))
with open(tmp_path, 'w') as fo:
fo.write(data)
os.chmod(tmp_path, int('755', 8))
return True
def _add_info_dir(t, tmp_dir, files, has_prefix, info):
info_dir = join(tmp_dir, 'info')
os.mkdir(info_dir)
with open(join(info_dir, 'files'), 'w') as fo:
for f in files:
fo.write(f + '\n')
with open(join(info_dir, 'index.json'), 'w') as fo:
json.dump(info, fo, indent=2, sort_keys=True, cls=EntityEncoder)
if has_prefix:
with open(join(info_dir, 'has_prefix'), 'w') as fo:
for f in has_prefix:
fo.write(f + '\n')
for fn in os.listdir(info_dir):
t.add(join(info_dir, fn), 'info/' + fn)
def create_conda_pkg(prefix, files, info, tar_path, update_info=None):
"""
create a conda package with `files` (in `prefix` and `info` metadata)
at `tar_path`, and return a list of warning strings
"""
files = sorted(files)
warnings = []
has_prefix = []
tmp_dir = tempfile.mkdtemp()
t = tarfile.open(tar_path, 'w:bz2')
h = hashlib.new('sha1')
for f in files:
assert not (f.startswith('/') or f.endswith('/') or '\\' in f or f == ''), f
path = join(prefix, f)
if f.startswith('bin/') and fix_shebang(tmp_dir, path):
path = join(tmp_dir, basename(path))
has_prefix.append(f)
t.add(path, f)
h.update(f.encode('utf-8'))
h.update(b'\x00')
if islink(path):
link = os.readlink(path)
if PY3 and isinstance(link, str):
h.update(bytes(link, 'utf-8'))
else:
h.update(link)
if link.startswith('/'):
warnings.append('found symlink to absolute path: %s -> %s' %
(f, link))
elif isfile(path):
h.update(open(path, 'rb').read())
if path.endswith('.egg-link'):
warnings.append('found egg link: %s' % f)
info['file_hash'] = h.hexdigest()
if update_info:
update_info(info)
_add_info_dir(t, tmp_dir, files, has_prefix, info)
t.close()
rmtree(tmp_dir)
return warnings
def make_tarbz2(prefix, name='unknown', version='0.0', build_number=0,
files=None):
if files is None:
files = untracked(prefix)
print("# files: %d" % len(files))
if len(files) == 0:
print("# failed: nothing to do")
return None
if any('/site-packages/' in f for f in files):
python_version = get_installed_version(prefix, 'python')
assert python_version is not None
requires_py = tuple(int(x) for x in python_version[:3].split('.'))
else:
requires_py = False
info = create_info(name, version, build_number, requires_py)
tarbz2_fn = ('%(name)s-%(version)s-%(build)s' % info) + CONDA_PACKAGE_EXTENSION_V1
create_conda_pkg(prefix, files, info, tarbz2_fn)
print('# success')
print(tarbz2_fn)
return tarbz2_fn
def which_package(path):
"""
given the path (of a (presumably) conda installed file) iterate over
the conda packages the file came from. Usually the iteration yields
only one package.
"""
path = abspath(path)
prefix = which_prefix(path)
if prefix is None:
from ..exceptions import CondaVerificationError
raise CondaVerificationError("could not determine conda prefix from: %s" % path)
for prec in PrefixData(prefix).iter_records():
if any(paths_equal(join(prefix, f), path) for f in prec['files'] or ()):
yield prec
def which_prefix(path):
"""
given the path (to a (presumably) conda installed file) return the
environment prefix in which the file in located
"""
prefix = abspath(path)
while True:
if isdir(join(prefix, 'conda-meta')):
# we found the it, so let's return it
return prefix
if prefix == dirname(prefix):
# we cannot chop off any more directories, so we didn't find it
return None
prefix = dirname(prefix)
| {
"content_hash": "5687834bcd7712105262cb4444d0af0f",
"timestamp": "",
"source": "github",
"line_count": 227,
"max_line_length": 88,
"avg_line_length": 30.09691629955947,
"alnum_prop": 0.5837236533957846,
"repo_name": "zooba/PTVS",
"id": "629f9dd37f7544cfc90bdd27ede9bd7b871cec15",
"size": "6931",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/site-packages/conda/cli/main_package.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12390821"
},
{
"name": "C++",
"bytes": "209386"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "25220"
},
{
"name": "Python",
"bytes": "888412"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
} |
from tastypie.resources import ModelResource, ALL
from .models import Filer, Filing
from .utils.serializer import CIRCustomSerializer
class FilerResource(ModelResource):
class Meta:
queryset = Filer.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filer_id_raw': ALL}
excludes = ['id']
class FilingResource(ModelResource):
class Meta:
queryset = Filing.objects.all()
serializer = CIRCustomSerializer()
filtering = {'filing_id_raw': ALL}
excludes = ['id']
| {
"content_hash": "659b8eb63ead767085f95841357260b8",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 49,
"avg_line_length": 28.68421052631579,
"alnum_prop": 0.6697247706422018,
"repo_name": "myersjustinc/django-calaccess-campaign-browser",
"id": "685275ae8d17ab9e879561b847feb9ee7be925a6",
"size": "545",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "calaccess_campaign_browser/api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6025"
},
{
"name": "HTML",
"bytes": "55142"
},
{
"name": "Makefile",
"bytes": "1291"
},
{
"name": "Python",
"bytes": "164809"
}
],
"symlink_target": ""
} |
from testSPARQL import ns_rdf
from testSPARQL import ns_rdfs
from testSPARQL import ns_dc0
from testSPARQL import ns_foaf
from testSPARQL import ns_ns
from testSPARQL import ns_book
from testSPARQL import ns_vcard
from testSPARQL import ns_person
from rdflib.Literal import Literal
from rdflib.sparql.sparqlOperators import lt, ge
import datetime
from rdflib.sparql.graphPattern import GraphPattern
thresholdDate = datetime.date(2005,01,01)
rdfData = """<?xml version="1.0" encoding="UTF-8"?>
<rdf:RDF
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
xmlns:ns = "http://example.org/ns#"
>
<rdf:Description>
<foaf:name>Alice</foaf:name>
<foaf:mbox rdf:resource="mailto:alice@example.com"/>
</rdf:Description>
<rdf:Description>
<foaf:name>Bob</foaf:name>
<foaf:mbox rdf:resource="mailto:bob@example.org"/>
</rdf:Description>
</rdf:RDF>
"""
select = []
pattern = GraphPattern([("?x",ns_foaf["name"],"?name")])
optional = []
construct = None
tripleStore = None
| {
"content_hash": "b7d87eb60c1555c2d5b43a1fec1a80b2",
"timestamp": "",
"source": "github",
"line_count": 42,
"max_line_length": 68,
"avg_line_length": 29.38095238095238,
"alnum_prop": 0.6636952998379254,
"repo_name": "alcides/rdflib",
"id": "60723b7ea4f08e5c5353317254081390486dfe1d",
"size": "1361",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "test/test_sparql/sparql/ConstructTests/Test10_23.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "47529"
},
{
"name": "Python",
"bytes": "1477729"
}
],
"symlink_target": ""
} |
"""
Burp-UI is a web-ui for burp backup written in python with Flask and
jQuery/Bootstrap
.. module:: burpui_monitor
:platform: Unix
:synopsis: Burp-UI monitor module.
.. moduleauthor:: Ziirish <hi+burpui@ziirish.me>
"""
__title__ = "burp-ui-monitor"
| {
"content_hash": "94187a3bb7d60e8a760c3698977bb292",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 68,
"avg_line_length": 23.727272727272727,
"alnum_prop": 0.6934865900383141,
"repo_name": "ziirish/burp-ui",
"id": "3989dfa4f44096b4033277c7a406cf609c0dcb85",
"size": "284",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pkgs/burp-ui-monitor/burpui_monitor-decoy/__init__.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "7308"
},
{
"name": "Dockerfile",
"bytes": "7163"
},
{
"name": "HTML",
"bytes": "166600"
},
{
"name": "JavaScript",
"bytes": "176986"
},
{
"name": "Mako",
"bytes": "412"
},
{
"name": "Python",
"bytes": "1067898"
},
{
"name": "Shell",
"bytes": "39490"
}
],
"symlink_target": ""
} |
from __future__ import division #3/2=1.5 and 3//2=1
#adam-does# blends cosmics and blocks them from being blended into stars
#adam-use # use with CRNitschke cosmics masking pipeline
import sys ; sys.path.append('/u/ki/awright/InstallingSoftware/pythons')
from import_tools import *
import astropy
from astropy.io import ascii
from copy import deepcopy as cp
import os
import pymorph
import skimage
from skimage import measure
from skimage import morphology
import mahotas
from matplotlib.pyplot import *
import numpy
numpy.warnings.filterwarnings('ignore') #adam-tmp#
warnings.simplefilter("ignore", DeprecationWarning)
from numpy import histogram
import time
ns=globals()
conn8=array([[1,1,1],[1,1,1],[1,1,1]])
conn4=array([[0,1,0],[1,1,1],[0,1,0]])
connS=array([[0,1,1,0],[1,1,1,1],[1,1,1,1],[0,1,1,0]],dtype=bool)
plotdir='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/plot_SCIENCE_compare/'
#START: RING FUNCTIONS define commands for fixing rings
slope_flat_cut=.04
ring_rr_cut=2.8 #new
def track2ring(track_spots,ring_spots):
'''this is designed to extend tracks through objects they are near. it is called within `ringer`. this fits a line to `track_spots` and stretches the mask along the line through the ring'''
try:
#fill ring then get ring pairs
ring_spots_all=scipy.ndimage.binary_fill_holes(ring_spots)
ayy,axx=nonzero(ring_spots_all)
#get track pairs and fit line to track spots
oyy,oxx=nonzero(track_spots)
rr,poly,polytype=polyfitter(track_spots,1)
#get spots in filled ring that are co-linear with the track
try:
m,b=poly.coeffs #for line y=m*x+b or x=m*y+b
print "track2ring poly.coeffs runs fine"
except ValueError:
print "track2ring poly.coeffs ValueErr"
return track_spots,0,rr
except AttributeError:
print "track2ring poly.coeffs AttributeErr"
return track_spots,0,rr
if rr>ring_rr_cut or isnan(rr):
return track_spots,0,rr
if polytype=='x_of_y':
aX=poly(ayy)
aOffsets=(axx-aX).__abs__()
oX=poly(oyy)
oOffsets=(oxx-oX).__abs__()
elif polytype=='y_of_x':
aY=poly(axx)
aOffsets=(ayy-aY).__abs__()
oY=poly(oxx)
oOffsets=(oyy-oY).__abs__()
else:
return track_spots,0,rr
extend_track_spots=aOffsets<1.3
xmin=oxx.min();xmax=oxx.max();ymin=oyy.min();ymax=oyy.max()
#make sure they are extending along the main axis of the track
ur=(axx>=xmax)*(ayy>=ymax)
ul=(axx<=xmin)*(ayy>=ymax)
lr=(axx>=xmax)*(ayy<=ymin)
ll=(axx<=xmin)*(ayy<=ymin)
if math.fabs(m)<slope_flat_cut:
if polytype=='x_of_y':
Rxxyy_spots=extend_track_spots*(ayy>=ymax) #upper
Lxxyy_spots=extend_track_spots*(ayy<=ymin) #lower
elif polytype=='y_of_x':
Rxxyy_spots=extend_track_spots*(axx>=xmax) #right
Lxxyy_spots=extend_track_spots*(axx<=xmin) #left
elif math.fabs(m)>slope_flat_cut**(-1):
if polytype=='x_of_y':
Rxxyy_spots=extend_track_spots*(axx>=xmax) #right
Lxxyy_spots=extend_track_spots*(axx<=xmin) #left
elif polytype=='y_of_x':
Rxxyy_spots=extend_track_spots*(ayy>=ymax) #upper
Lxxyy_spots=extend_track_spots*(ayy<=ymin) #lower
elif m>0:
Rxxyy_spots=extend_track_spots*ur
Lxxyy_spots=extend_track_spots*ll
elif m<0:
Rxxyy_spots=extend_track_spots*lr
Lxxyy_spots=extend_track_spots*ul
Rxx,Ryy=axx[Rxxyy_spots],ayy[Rxxyy_spots]
Lxx,Lyy=axx[Lxxyy_spots],ayy[Lxxyy_spots]
#now change the final mask if the edgepoints are above the threshold
track_spots_final=track_spots.copy()
Rpts=zip(Ryy,Rxx)
Lpts=zip(Lyy,Lxx)
included=0
for o in Rpts+Lpts:
included+=1
track_spots_final[o]=True
# now include a co-linear connector (intra-track/inter-track connector)
track_spots_final=connector(track_spots_final)
return track_spots_final,included,rr
except:
ns.update(locals())
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def ringable(ra_object):
'''this takes an "almost ring" and makes it a true ring. it is called within `ringer`.'''
ra_spots=asarray(ra_object.copy(),dtype=bool)
ra_insides=scipy.ndimage.binary_fill_holes(ra_spots)* numpy.logical_not(ra_spots)
hom=zeros(ra_spots.shape)
for corner in [(0,0),(0,-1),(-1,0),(-1,-1)]:
miss=zeros((3,3),dtype=bool)
miss[corner]=1
hit=scipy.ndimage.morphology.binary_dilation(miss,conn4)*numpy.logical_not(miss)
hom+=scipy.ndimage.morphology.binary_hit_or_miss(ra_spots, structure1=hit, structure2=miss)
hom=asarray(hom,dtype=bool)
fill_them=ra_insides*hom
ra_spots[fill_them]=1
#new to accomodate count_hole_filled_pixels
ra_skel=pymorph.thin(ra_spots)
ra_ring=pymorph.thin(ra_skel,pymorph.endpoints())
if not ra_ring.any(): #fill in the tiny gaps that ruin the ring!
ra4_spots=scipy.ndimage.binary_dilation(ra_spots,conn4)
ra4_skel=pymorph.thin(ra4_spots)
ra4_ring=pymorph.thin(ra4_skel,pymorph.endpoints())
if ra4_ring.any(): #fill in the tiny gaps that ruin the ring!
print "ringable 4\n"
ra_insides=scipy.ndimage.binary_fill_holes(ra4_ring)
fill_them=ra_insides*ra4_spots
ra_spots[fill_them]=1
return ra_spots
ra8_spots=scipy.ndimage.binary_dilation(ra_spots,conn8)
ra8_skel=pymorph.thin(ra8_spots)
ra8_ring=pymorph.thin(ra8_skel,pymorph.endpoints())
if ra8_ring.any(): #fill in the tiny gaps that ruin the ring!
print "ringable 8\n"
ra_insides=scipy.ndimage.binary_fill_holes(ra8_ring)
fill_them=ra_insides*ra8_spots
ra_spots[fill_them]=1
return ra_spots
def ringer_noplot(spots_ringer,l_ringer,filtstamp_ringer,imstamp_ringer,seg0stamp_ringer,star_stamp):
'''input the detection stamp with a ring in it and output the detection stamp if you remove the ring and extend the outside tracks through the ring'''
try:
fl_label_str='file=%s label=%.4i' % (OFB,l_ringer)
#DONT CONTINUE: if saturation spike
sl2_height,sl2_width=imstamp_ringer.shape
sl2_height,sl2_width=float(sl2_height-6),float(sl2_width-6)
if sl2_height>230 and (sl2_height/sl2_width)>25:
return spots_ringer, "saturation spike"
#DONT CONTINUE: if really long and skinny ring
inside4_b4=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).any()
#START: what was `getring_track(spots_ringer)`
#input object mask and output the pixels separated into a ring pixels and track pixels
ringer_skel=pymorph.thin(spots_ringer)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any(): #fill in the tiny gaps that ruin the ring!
spots_ringer2=ringable(spots_ringer)
ringer_skel=pymorph.thin(spots_ringer2)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any():
print (fl_label_str+": RINGABLE didnt work!\n")
return spots_ringer, "Un-ringable holes"
else:
spots_ringer=spots_ringer2
#DONT CONTINUE: if really long and skinny ring
inside4_after=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).sum()
if not inside4_b4 and not inside4_after>5:
return spots_ringer, "none in square pattern" #might as well put this at beginning, if it fails (and I want it to pass) it'll probably pass after the thresh is raised
#now if there are gaps in the ring, then take only the inner portion surrounding them
insides=scipy.ndimage.binary_fill_holes(ring)* logical_not(ring)
newinsides=skimage.morphology.remove_small_objects(insides,2,connectivity=1) #conn4
if (insides!=newinsides).any():
newinsides_seg,Nnewinsides_segs= scipy.ndimage.label(newinsides,conn8)
if Nnewinsides_segs<=1:
ring2=scipy.ndimage.binary_dilation(newinsides,conn8,mask=ring)*logical_not(newinsides)
ring=ring2
insides=newinsides
#skel_outside_ring=ringer_skel*logical_not(scipy.ndimage.binary_fill_holes(scipy.ndimage.binary_dilation(ring,conn4)))
ring_and_insides=insides+ring
outsides=logical_not(ring_and_insides)
skel_outside_ring=ringer_skel*outsides
ringer_track_portions=skimage.morphology.remove_small_objects(skel_outside_ring,3,connectivity=2) #conn8
ringer_track_spots=spots_ringer*scipy.ndimage.binary_dilation(ringer_track_portions,conn8,mask=outsides)
Rgetring_ring,Rgetring_track=asarray(ring,dtype=bool),asarray(ringer_track_spots,dtype=bool)
#END: end of what was previously getring_track
#DONT CONTINUE: if it's a circle of cosmics
#tree_ring=ring.copy()
ring_and_outer_layer=scipy.ndimage.binary_dilation(ring,conn4,mask=outsides)
image_ring,image_ring_widen=imstamp_ringer[ring],imstamp_ringer[ring_and_outer_layer]
image_ring.sort();image_ring_widen.sort()
image_ring,image_ring_widen=image_ring[:-3],image_ring_widen[:-3]
image_ring_mean=max(image_ring.mean(),image_ring_widen.mean())
image_ring_filled_mean=(imstamp_ringer[insides].mean())
if image_ring_mean>image_ring_filled_mean: #if the mean value of the edge is greater than the middle, then it isn't an object at all
print (fl_label_str+": circle of cosmics!\n")
return spots_ringer, "Circle of Cosmics"
#get original mask
ringer_mask0=seg0stamp_ringer>0
ringer0=ringer_mask0*spots_ringer
yy0,xx0=nonzero(ringer0)
Pts0=zip(yy0,xx0)
for pt0 in Pts0:
if not Rgetring_track[pt0]:
if skel_outside_ring[pt0]:
skel_outside_seg,Nskelsegs=scipy.ndimage.label(skel_outside_ring,conn8)
pt0_l=skel_outside_seg[pt0]
pt0_spots=skel_outside_seg==pt0_l
Rgetring_track[pt0_spots]=True
else:
Rgetring_track[pt0]=True
if not Rgetring_track.any():#Now if it was all ring
#reset to the original mask
return spots_ringer, "Entire thing was a ring"
#SIMPLE LINE: BEGIN try seeing if everything fits in a simple line really easily
max_within=scipy.stats.scoreatpercentile(filtstamp_ringer[ring_and_insides],95)
cosmics_lintry=(filtstamp_ringer>max_within*2)*spots_ringer
yy_lin,xx_lin=nonzero(cosmics_lintry)
try:
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
if cosmics_lintry.sum()>4 and track_length>7:
track_spots_final,included,rr=track2ring(cosmics_lintry,Rgetring_ring)
if (rr<.75) or (cosmics_lintry.sum()>9 and rr<1.03):
print (fl_label_str+": SIMPLE LINE!\n")
track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,rr_per_step=.25)
#now include tracks that overlap with the mask
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
track_seg_include=ring_seg[cosmics_lintry]
track_seg_include_labels=unique(track_seg_include).tolist()
try:track_seg_include_labels.remove(0)
except ValueError:pass
if track_seg_include_labels:
spots_yy_all,spots_xx_all=array([],dtype=int),array([],dtype=int)
for l_track in track_seg_include_labels:
spots=ring_seg==l_track
track_spots_final+=spots
spots_yy,spots_xx=nonzero(spots)
spots_yy_all=append(spots_yy_all,spots_yy)
spots_xx_all=append(spots_xx_all,spots_xx)
ringer_yy,ringer_xx=nonzero(track_spots_final)
return track_spots_final, 0 #ringstat==0 implies all is well with ringer
except ValueError:
if cosmics_lintry.any(): print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
else: pass
#SIMPLE LINE: END try seeing if everything fits in a simple line really easily
# first doing ring segments with 1 layer outside the ring excluded (gets closer to the ring), then doing it with 2 layers excluded (has advantage of not mixing detections near the ring). then if the 1layer and 2layer thing disagree I ta
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
ringer_track_labels=range(1,1+Nring_track_labels)
ring_slices=scipy.ndimage.find_objects(ring_seg)
ring_and_outer_layers2=scipy.ndimage.binary_dilation(ring_and_outer_layer,conn8,mask=outsides)
ring_seg_layers2,Nring_track_labels_layers2=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layers2),conn8)
#if there are a ton of track pieces, then I'll go with the original mask thing
ringer_track_labels_loop=copy(ringer_track_labels)
xx_seg,yy_seg=array([]),array([])
for l_bit in ringer_track_labels_loop:
sl=ring_slices[l_bit-1]
track_spots=ring_seg[sl]==l_bit
ringer0_here=ringer0[sl][track_spots].any()
if track_spots.sum()<7:
continue
layers2_stamp=ring_seg_layers2[sl]
layers2_at_track=layers2_stamp[track_spots]
layers2_at_track_labels=unique(layers2_at_track).tolist()
try:layers2_at_track_labels.remove(0)
except ValueError:pass
Nl2s_possible=len(layers2_at_track_labels)
if Nl2s_possible>1:
l2_sizes=[]
l2_in_orig=[]
for l2_l in layers2_at_track_labels:
l2_spots=layers2_stamp==l2_l
l2_in_orig.append(ringer0[sl][l2_spots].any())
l2_sizes.append(l2_spots.sum())
l2_sizes=array(l2_sizes)
l2_size_cut=l2_sizes>2
Nl2s=sum(l2_size_cut)
if Nl2s>=2:
if ringer0_here and not array(l2_in_orig).any(): continue
ringer_track_labels_add=max(ringer_track_labels)+1+arange(Nl2s_possible)
ringer_track_labels=ringer_track_labels+ringer_track_labels_add.tolist()
ring_seg[sl][track_spots]=0
ringer_track_labels.remove(l_bit)
for l2_l,ring_seg_l in zip(layers2_at_track_labels,ringer_track_labels_add):
l2_spots=layers2_stamp==l2_l
l2yy,l2xx=nonzero(l2_spots)
xx_seg=append(xx_seg,l2xx)
yy_seg=append(yy_seg,l2yy)
ring_seg[sl][l2_spots]=ring_seg_l
print (fl_label_str+": thing with 1layer and 2layer masks actually matters!\n")
ringer_track_labels=asarray(ringer_track_labels)
ring_seg_in_orig=[]
ring_seg_maxvals=[]
ring_seg_areas=[]
for l_bit in ringer_track_labels:
track_spots=ring_seg==l_bit
ring_seg_in_orig.append(ringer0[track_spots].any())
ring_seg_maxvals.append(filtstamp_ringer[track_spots].max())
ring_seg_areas.append(track_spots.sum())
ring_seg_in_orig=asarray(ring_seg_in_orig)
ring_seg_maxvals=asarray(ring_seg_maxvals)
ring_seg_areas=asarray(ring_seg_areas)
#keep anything that's above twice the highest ring value or was an original masked pixel
ring_seg_keep=(ring_seg_maxvals>max_within*2) + ring_seg_in_orig
if ring_seg_keep.sum()>0:ringer_track_labels=ringer_track_labels[ring_seg_keep]
else:
print (fl_label_str+': if none are the originals, then take the largest and the brightest\n')
try:
max_label=ringer_track_labels[ring_seg_maxvals.argmax()]
area_label=ringer_track_labels[ring_seg_areas.argmax()]
ringer_track_labels=[max_label]
if area_label!=max_label and ring_seg_areas.max()>5: ringer_track_labels.append(area_label)
except ValueError:
return spots_ringer, "Un-ringable holes"#if there is no max valued/max area thing, then they're all super small and
newring=ringer0.copy() #at the very least, use the original track pixels
Nringworms=0
for bit_i,l_bit in enumerate(ringer_track_labels):
track_spots=ring_seg==l_bit
track_spots_final,included,rr=track2ring(track_spots,Rgetring_ring)
#now extend track?!
if not isnan(rr):track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,name_extras=('ring_rr%.2f' % (rr,)).replace('.','pt'),rr_per_step=.2)
else:track_spots_final=scipy.ndimage.binary_dilation(track_spots_final,conn8)
newring+=track_spots_final
ringer_yy,ringer_xx=nonzero(track_spots_final)
try:
if rr>ring_rr_cut or isnan(rr):
Nringworms+=1
except IndexError: pass
#if there are 2 worms, then mask entire thing!
if Nringworms>1:
newring+=ring_and_insides
ringer_Fyy,ringer_Fxx=nonzero(newring)
return newring, 0 #ringstat==0 implies all is well with ringer
except:
ns.update(locals())
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
#RINGER
def ringer(spots_ringer,l_ringer,filtstamp_ringer,imstamp_ringer,seg0stamp_ringer,star_stamp):
'''input the detection stamp with a ring in it and output the detection stamp if you remove the ring and extend the outside tracks through the ring'''
try:
pltstr='pltRevise%s_holes_ringer-label%.4i' % (OFB,l_ringer)
pltextras=''
fl_label_str='file=%s label=%.4i' % (OFB,l_ringer)
#DONT CONTINUE: if saturation spike
sl2_height,sl2_width=imstamp_ringer.shape
sl2_height,sl2_width=float(sl2_height-6),float(sl2_width-6)
if sl2_height>230 and (sl2_height/sl2_width)>25:
return spots_ringer, "saturation spike"
#DONT CONTINUE: if really long and skinny ring
inside4_b4=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).any()
#START: what was `getring_track(spots_ringer)`
#input object mask and output the pixels separated into a ring pixels and track pixels
ringer_skel=pymorph.thin(spots_ringer)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any(): #fill in the tiny gaps that ruin the ring!
spots_ringer2=ringable(spots_ringer)
ringer_skel=pymorph.thin(spots_ringer2)
ring=pymorph.thin(ringer_skel,pymorph.endpoints())
if not ring.any():
print (fl_label_str+": RINGABLE didnt work!\n")
f=figure(figsize=(20,10))
yy,xx=nonzero(spots_ringer)
imshow(imstamp_ringer,interpolation='nearest',origin='lower left')
scatter(xx,yy,edgecolors='k',facecolors='None')
title('Holes there, but not ringable')
pltextras+='-NoChange_UnRingable'
f.suptitle(fl_label_str+pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return spots_ringer, "Un-ringable holes"
else:
spots_ringer=spots_ringer2
#DONT CONTINUE: if really long and skinny ring
inside4_after=scipy.ndimage.binary_opening(scipy.ndimage.binary_fill_holes(spots_ringer)* logical_not(spots_ringer),array([[1,1],[1,1]],dtype=bool)).sum()
if not inside4_b4 and not inside4_after>5:
return spots_ringer, "none in square pattern" #might as well put this at beginning, if it fails (and I want it to pass) it'll probably pass after the thresh is raised
#now if there are gaps in the ring, then take only the inner portion surrounding them
insides=scipy.ndimage.binary_fill_holes(ring)* logical_not(ring)
newinsides=skimage.morphology.remove_small_objects(insides,2,connectivity=1) #conn4
if (insides!=newinsides).any():
newinsides_seg,Nnewinsides_segs= scipy.ndimage.label(newinsides,conn8)
if Nnewinsides_segs<=1:
ring2=scipy.ndimage.binary_dilation(newinsides,conn8,mask=ring) * logical_not(newinsides)
f=figure()
ax=f.add_subplot(2,2,1);imshow(ring,interpolation='nearest',origin='lower left');title('ring')
ax=f.add_subplot(2,2,2);imshow(insides,interpolation='nearest',origin='lower left');title('insides')
ax=f.add_subplot(2,2,3);imshow(newinsides,interpolation='nearest',origin='lower left');title('newinsides')
ax=f.add_subplot(2,2,4);imshow(ring2,interpolation='nearest',origin='lower left');title('ring2')
pltextras+='-reringing'
f.suptitle(fl_label_str+pltextras+'NewRing')
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
ring=ring2
insides=newinsides
#skel_outside_ring=ringer_skel*logical_not(scipy.ndimage.binary_fill_holes(scipy.ndimage.binary_dilation(ring,conn4)))
ring_and_insides=insides+ring
outsides=logical_not(ring_and_insides)
skel_outside_ring=ringer_skel*outsides
ringer_track_portions=skimage.morphology.remove_small_objects(skel_outside_ring,3,connectivity=2) #conn8
ringer_track_spots=spots_ringer*scipy.ndimage.binary_dilation(ringer_track_portions,conn8,mask=outsides)
Rgetring_ring,Rgetring_track=asarray(ring,dtype=bool),asarray(ringer_track_spots,dtype=bool)
#END: end of what was previously getring_track
#DONT CONTINUE: if it's a circle of cosmics
#tree_ring=ring.copy()
ring_and_outer_layer=scipy.ndimage.binary_dilation(ring,conn4,mask=outsides)
image_ring,image_ring_widen=imstamp_ringer[ring],imstamp_ringer[ring_and_outer_layer]
image_ring.sort();image_ring_widen.sort()
image_ring,image_ring_widen=image_ring[:-3],image_ring_widen[:-3]
image_ring_mean=max(image_ring.mean(),image_ring_widen.mean())
image_ring_filled_mean=(imstamp_ringer[insides].mean())
if image_ring_mean>image_ring_filled_mean: #if the mean value of the edge is greater than the middle, then it isn't an object at all
print (fl_label_str+": circle of cosmics!\n")
f=figure(figsize=(20,10))
yy,xx=nonzero(spots_ringer)
imshow(imstamp_ringer,interpolation='nearest',origin='lower left')
scatter(xx,yy,edgecolors='k',facecolors='None')
title('circle of cosmics')
pltextras+='-NoChange_CircleOfCosmics'
f.suptitle('file=%s label=%.4i image_ring_mean=%.4f>image_ring_filled_mean=%.4f' % (OFB,l_ringer,image_ring_mean,image_ring_filled_mean) + pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return spots_ringer, "Circle of Cosmics"
#get original mask
ringer_mask0=seg0stamp_ringer>0
ringer0=ringer_mask0*spots_ringer
yy0,xx0=nonzero(ringer0)
Pts0=zip(yy0,xx0)
for pt0 in Pts0:
if not Rgetring_track[pt0]:
if skel_outside_ring[pt0]:
skel_outside_seg,Nskelsegs=scipy.ndimage.label(skel_outside_ring,conn8)
pt0_l=skel_outside_seg[pt0]
pt0_spots=skel_outside_seg==pt0_l
Rgetring_track[pt0_spots]=True
else:
Rgetring_track[pt0]=True
f=figure(figsize=(20,10))
f.subplots_adjust(left=.03, bottom=.03, right=.97, top=.93);f.suptitle(fl,size=8)
if not Rgetring_track.any():#Now if it was all ring
#reset to the original mask
ax=f.add_subplot(111)
yy,xx=nonzero(spots_ringer)
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(xx,yy,edgecolors='k',facecolors='None')
scatter(xx0,yy0,edgecolors='w',marker='x')
ax.set_title('No track found around the ring. Un-doing the blend so the original mask (the white "x"s) will be used!')
pltextras+='-NoChange_NoTrack'
f.suptitle(fl_label_str+pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return spots_ringer, "Entire thing was a ring"
#SIMPLE LINE: BEGIN try seeing if everything fits in a simple line really easily
max_within=scipy.stats.scoreatpercentile(filtstamp_ringer[ring_and_insides],95)
cosmics_lintry=(filtstamp_ringer>max_within*2)*spots_ringer
yy_lin,xx_lin=nonzero(cosmics_lintry)
try:
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
if cosmics_lintry.sum()>4 and track_length>7:
track_spots_final,included,rr=track2ring(cosmics_lintry,Rgetring_ring)
if (rr<.75) or (cosmics_lintry.sum()>9 and rr<1.03):
print (fl_label_str+": SIMPLE LINE!\n")
track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,rr_per_step=.25)
#now include tracks that overlap with the mask
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
track_seg_include=ring_seg[cosmics_lintry]
track_seg_include_labels=unique(track_seg_include).tolist()
try:track_seg_include_labels.remove(0)
except ValueError:pass
if track_seg_include_labels:
spots_yy_all,spots_xx_all=array([],dtype=int),array([],dtype=int)
for l_track in track_seg_include_labels:
spots=ring_seg==l_track
track_spots_final+=spots
spots_yy,spots_xx=nonzero(spots)
spots_yy_all=append(spots_yy_all,spots_yy)
spots_xx_all=append(spots_xx_all,spots_xx)
ringer_yy,ringer_xx=nonzero(track_spots_final)
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_xx,ringer_yy,marker='o',edgecolors='k',facecolors='None',s=50)
scatter(xx_lin,yy_lin,marker='x',edgecolors='w',facecolors='None')
pltextras+='-simple_line_interupt'
try:
scatter(spots_xx_all,spots_yy_all,marker='s',edgecolors='purple',facecolors='None',s=50)
f.suptitle('SIMPLE LINE: file=%s label=%.4i rr=%.4f' % (OFB,l_ringer,rr) +pltextras+'\nwhite "x"=spots that formed simple line, black "o"=final mask, purple \t=overlapping tracks included')
except:
f.suptitle('SIMPLE LINE: file=%s label=%.4i rr=%.4f' % (OFB,l_ringer,rr) +pltextras+'\nwhite "x"=spots that formed simple line, black "o"=final mask')
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return track_spots_final, 0 #ringstat==0 implies all is well with ringer
except ValueError:
if cosmics_lintry.any(): print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
else: pass
#SIMPLE LINE: END try seeing if everything fits in a simple line really easily
ax=f.add_subplot(2,6,1);ax.set_title('spots_ringer="o"\n& original mask ="x"');yy,xx=nonzero(spots_ringer);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
scatter(xx0,yy0,edgecolors='w',marker='x')
ax=f.add_subplot(2,6,2);ax.set_title('ringer_skel');yy,xx=nonzero(ringer_skel);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,3);ax.set_title('Rgetring_ring&ring');yy,xx=nonzero(ring);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,4);ax.set_title('skel_outside_ring');yy,xx=nonzero(skel_outside_ring);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,5);ax.set_title('ringer_track_portions');yy,xx=nonzero(ringer_track_portions);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
ax=f.add_subplot(2,6,6);ax.set_title('Rgetring_track\n& ringer_track_spots');yy,xx=nonzero(ringer_track_spots);imshow(filtstamp_ringer,interpolation='nearest',origin='lower left');scatter(xx,yy,edgecolors='k',facecolors='None')
# first doing ring segments with 1 layer outside the ring excluded (gets closer to the ring), then doing it with 2 layers excluded (has advantage of not mixing detections near the ring). then if the 1layer and 2layer thing disagree I take the 2layer results (as long as they fit certain criteria)
ring_seg,Nring_track_labels=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layer),conn8)
ringer_track_labels=range(1,1+Nring_track_labels)
ring_slices=scipy.ndimage.find_objects(ring_seg)
ring_and_outer_layers2=scipy.ndimage.binary_dilation(ring_and_outer_layer,conn8,mask=outsides)
ring_seg_layers2,Nring_track_labels_layers2=scipy.ndimage.label(Rgetring_track*logical_not(ring_and_outer_layers2),conn8)
#if there are a ton of track pieces, then I'll go with the original mask thing
ringer_track_labels_loop=copy(ringer_track_labels)
xx_seg,yy_seg=array([]),array([])
for l_bit in ringer_track_labels_loop:
sl=ring_slices[l_bit-1]
track_spots=ring_seg[sl]==l_bit
ringer0_here=ringer0[sl][track_spots].any()
if track_spots.sum()<7:
continue
layers2_stamp=ring_seg_layers2[sl]
layers2_at_track=layers2_stamp[track_spots]
layers2_at_track_labels=unique(layers2_at_track).tolist()
try:layers2_at_track_labels.remove(0)
except ValueError:pass
Nl2s_possible=len(layers2_at_track_labels)
if Nl2s_possible>1:
l2_sizes=[]
l2_in_orig=[]
for l2_l in layers2_at_track_labels:
l2_spots=layers2_stamp==l2_l
l2_in_orig.append(ringer0[sl][l2_spots].any())
l2_sizes.append(l2_spots.sum())
l2_sizes=array(l2_sizes)
l2_size_cut=l2_sizes>2
Nl2s=sum(l2_size_cut)
if Nl2s>=2:
if ringer0_here and not array(l2_in_orig).any(): continue
ringer_track_labels_add=max(ringer_track_labels)+1+arange(Nl2s_possible)
ringer_track_labels=ringer_track_labels+ringer_track_labels_add.tolist()
ring_seg[sl][track_spots]=0
ringer_track_labels.remove(l_bit)
for l2_l,ring_seg_l in zip(layers2_at_track_labels,ringer_track_labels_add):
l2_spots=layers2_stamp==l2_l
l2yy,l2xx=nonzero(l2_spots)
xx_seg=append(xx_seg,l2xx)
yy_seg=append(yy_seg,l2yy)
ring_seg[sl][l2_spots]=ring_seg_l
print (fl_label_str+": thing with 1layer and 2layer masks actually matters!\n")
pltextras+='-2layer_masks'
ringer_track_labels=asarray(ringer_track_labels)
ring_seg_in_orig=[]
ring_seg_maxvals=[]
ring_seg_areas=[]
for l_bit in ringer_track_labels:
track_spots=ring_seg==l_bit
ring_seg_in_orig.append(ringer0[track_spots].any())
ring_seg_maxvals.append(filtstamp_ringer[track_spots].max())
ring_seg_areas.append(track_spots.sum())
ax=f.add_subplot(2,6,7)
ax.set_title('ring_seg')
imshow(ring_seg,interpolation='nearest',origin='lower left')
if len(xx_seg):scatter(xx_seg,yy_seg,edgecolors='k',facecolors='None')
ring_seg_in_orig=asarray(ring_seg_in_orig)
ring_seg_maxvals=asarray(ring_seg_maxvals)
ring_seg_areas=asarray(ring_seg_areas)
#keep anything that's above twice the highest ring value or was an original masked pixel
ring_seg_keep=(ring_seg_maxvals>max_within*2) + ring_seg_in_orig
if ring_seg_keep.sum()>0:
ringer_track_labels=ringer_track_labels[ring_seg_keep]
else:
print (fl_label_str+': if none are the originals, then take the largest and the brightest\n')
pltextras+='-largest_and_brightest'
try:
max_label=ringer_track_labels[ring_seg_maxvals.argmax()]
area_label=ringer_track_labels[ring_seg_areas.argmax()]
ringer_track_labels=[max_label]
if area_label!=max_label and ring_seg_areas.max()>5: ringer_track_labels.append(area_label)
except ValueError:
close(f);del f
return spots_ringer, "Un-ringable holes"#if there is no max valued/max area thing, then they're all super small and
newring=ringer0.copy() #at the very least, use the original track pixels
Nringworms=0
for bit_i,l_bit in enumerate(ringer_track_labels):
track_spots=ring_seg==l_bit
track_spots_final,included,rr=track2ring(track_spots,Rgetring_ring)
#now extend track?!
if not isnan(rr):
track_spots_final,stretch_count=iter_track_stretch(track_spots_final, filtstamp_ringer,dt_times_pt01,BASE,l_ringer,star_stamp,ts_rr_cut=ring_rr_cut,name_extras=('ring_rr%.2f' % (rr,)).replace('.','pt'),rr_per_step=.2)
else:
track_spots_final=scipy.ndimage.binary_dilation(track_spots_final,conn8)
newring+=track_spots_final
ringer_yy,ringer_xx=nonzero(track_spots_final)
try:
ax=f.add_subplot(2,6,bit_i+8)
ax.set_title('ringer track extension\niter='+str(bit_i))
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_xx,ringer_yy,marker='o',edgecolors='k',facecolors='None',s=50)
if rr>ring_rr_cut or isnan(rr):
Nringworms+=1
pltextras+='-ringworms%s' % (Nringworms)
ax.set_title(ax.get_title()+" (rr=%.3f>rr_cut=%.3f)" % (rr,ring_rr_cut))
except ValueError: #if there are a lot of track pieces
if not 'TrackPiecesEQ' in pltextras:
pltextras+='-TrackPiecesEQ%s' % (len(ringer_track_labels))
except IndexError: #if there are a lot of track pieces
if not 'TrackPiecesEQ' in pltextras:
pltextras+='-TrackPiecesEQ%s' % (len(ringer_track_labels))
#if there are 2 worms, then mask entire thing!
if Nringworms>1:
newring+=ring_and_insides
ringer_Fyy,ringer_Fxx=nonzero(newring)
ax=f.add_subplot(2,6,11)
ax.set_title('ringer track extension\nFINAL')
imshow(filtstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_Fxx,ringer_Fyy,marker='o',edgecolors='k',facecolors='None',s=50)
ax=f.add_subplot(2,6,12)
ax.set_title('unfiltered image')
imshow(imstamp_ringer,interpolation='nearest',origin='lower left')
scatter(ringer_Fxx,ringer_Fyy,marker='o',edgecolors='k',facecolors='None',s=50)
pltextras+='-Changed'
f.suptitle(fl_label_str+pltextras)
f.savefig(plotdir+pltstr+pltextras)
close(f);del f
return newring, 0 #ringstat==0 implies all is well with ringer
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
#END: RING FUNCTIONS define commands for fixing rings
#START: HOLE FUNCTIONS define command for counting holes in objects
def count_hole_filled_pixels(spots):
'''count the number of holes in the spots, and if there is a ring that isn't quite filled, then fill it and count the holes'''
holefilledpixels=(mahotas.close_holes(spots)!=spots).sum()
if holefilledpixels>9:
return holefilledpixels
spots4=mahotas.dilate(spots,conn4)
holefilledpixels4=(mahotas.close_holes(spots4)!=spots4).sum()
if holefilledpixels4>holefilledpixels:
return holefilledpixels4
spots8=mahotas.dilate(spots,conn8)
holefilledpixels8=(mahotas.close_holes(spots8)!=spots8).sum()
if holefilledpixels8>holefilledpixels:
return holefilledpixels8
holefilledpixels_options=array([holefilledpixels,holefilledpixels4,holefilledpixels8])
return holefilledpixels_options.max()
#END: HOLE FUNCTIONS define command for counting holes in objects
#START: POLYNOMIAL FUNCTIONS define command for fitting lines and polynomials to objects
def polyfitter_specific(cosmics,polytype,degree=1):
'''This fits a polynomial (of a specific polytype, i.e. x_of_y or y_of_x) to the True elements in cosmics.
call it like: rr,poly,polytype=polyfitter_specific(cosmics,'x_of_y',1)'''
try:
yy,xx=nonzero(cosmics)
if polytype=='y_of_x': #XY
pXY, residualsXY, rankXY, singular_valuesXY, rcondXY = polyfit(xx,yy,degree,full=True)
try:
rXY=residualsXY.min()
except ValueError:
rXY=nan
if isnan(rXY):
return nan,None,None
rr=rXY/len(xx)
y_of_x = poly1d(pXY)
return rr,y_of_x,'y_of_x'
if polytype=='x_of_y': #YX
pYX, residualsYX, rankYX, singular_valuesYX, rcondYX = polyfit(yy,xx,degree,full=True)
try:
rYX=residualsYX.min()
except ValueError:
rYX=nan
if isnan(rYX):
return nan,None,None
rr=rYX/len(xx)
x_of_y = poly1d(pYX)
return rr,x_of_y,'x_of_y'
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def polyfitter(cosmics,degree=1):
'''This fits a polynomial to the True elements in cosmics.
call it like: rr,poly,polytype=polyfitter(cosmics,1)'''
try:
yy,xx=nonzero(cosmics)
#if cosmics is small enough, then see how oblong it is and if it's super oblong then fit with the dependent variable being the one we have more of
if len(xx)<100:
Yextent,Xextent=len(unique(yy)),len(unique(xx))
Y2Xratio=Yextent/float(Xextent)
if Y2Xratio>2.0:
return polyfitter_specific(cosmics,'x_of_y',degree=degree)
elif Y2Xratio<.5:
return polyfitter_specific(cosmics,'y_of_x',degree=degree)
#else continue with the fit
#if cosmics is big or not oblong it continues with the usual fit here
try:
pXY, residualsXY, rankXY, singular_valuesXY, rcondXY = polyfit(xx,yy,degree,full=True)
rXY=residualsXY.min()
except ValueError:
rXY=nan
try:
pYX, residualsYX, rankYX, singular_valuesYX, rcondYX = polyfit(yy,xx,degree,full=True)
rYX=residualsYX.min()
except ValueError:
rYX=nan
residual=nanmin([rXY,rYX])
if isnan(residual):
return nan,None,None
rr=residual/len(xx)
if rXY<=rYX:
y_of_x = poly1d(pXY)
return rr,y_of_x,'y_of_x'
else:
x_of_y = poly1d(pYX)
return rr,x_of_y,'x_of_y'
except:
ns.update(locals())
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def cosmicpoly(l,cosmics,stamp,ax,**kwargs):
'''cosmicpoly is like polyfitter(cosmics,degree=5)'''
try:
yy,xx=nonzero(cosmics)
pXY, residualsXY, rankXY, singular_valuesXY, rcondXY = polyfit(xx,yy,5,full=True)
pYX, residualsYX, rankYX, singular_valuesYX, rcondYX = polyfit(yy,xx,5,full=True)
try:
rXY=residualsXY.min()
except ValueError:
rXY=nan
try:
rYX=residualsYX.min()
except ValueError:
rYX=nan
residual=nanmin([rXY,rYX])
if isnan(residual):
return ax,nan
rr=residual/len(xx)
y_of_x = poly1d(pXY)
x_of_y = poly1d(pYX)
X=arange(xx.min(),xx.max(),.1)
Y=arange(yy.min(),yy.max(),.1)
ax.imshow(stamp,interpolation='nearest',origin='lower left')
if not 'marker' in kwargs:
kwargs['marker']='o'
ax.scatter(xx,yy,edgecolors='k',facecolors='None',label='points',**kwargs)
if rXY<rYX:
ax.plot(X,y_of_x(X),'y')
ax.plot(x_of_y(Y),Y,'r--')
else:
ax.plot(X,y_of_x(X),'y--')
ax.plot(x_of_y(Y),Y,'r')
yd,yu=yy.min()-4,yy.max()+4
xd,xu=xx.min()-4,xx.max()+4
ywidth=yu-yd
xwidth=xu-xd
if xwidth>ywidth:
ax.set_ylim(yd,yd+xwidth)
ax.set_xlim(xd,xu)
elif ywidth>xwidth:
ax.set_xlim(xd,xd+ywidth)
ax.set_ylim(yd,yu)
ax.set_title('label %s: residual/#points=%.3f' % (l,rr),size=12)
return ax,rr
except:
ns.update(locals())
show();raise
#END: POLYNOMIAL FUNCTIONS define command for fitting lines and polynomials to objects
#START: TRACK STRETCHING and CONNECTING
ts_count=0
def track_stretcher(cosmics,CRfiltstamp,thresh,star_stamp,stretchL_total,stretchR_total,ts_rr_cut,name_extras,rr_per_step):
'''this fits a line to `cosmics` and stretches the mask along the line, then it determines if any of the pixels included from the stretching have counts in `CRfiltstamp` above `thresh`. If they do, then those pixels are included in the final mask and it returns `cosmics_final,1`, else it returns `cosmics,0`. It is mean to be called from within iter_track_stretch'''
try:
rr,poly,polytype=polyfitter(cosmics,1)
#get spots along the line
if rr>ts_rr_cut:
return cosmics,0,0,rr
#get cosmic endpoints
cosmic_ends=cosmics*logical_not(pymorph.thin(cosmics,pymorph.endpoints(option='homotopic'),1))
around_cosmics=scipy.ndimage.binary_dilation(cosmic_ends,structure=conn8, iterations=2) * logical_not(cosmics+star_stamp) #this way stars aren't included in pts at all!
ayy,axx=nonzero(around_cosmics)
if polytype=='x_of_y':
aX=poly(ayy)
aOffsets=(axx-aX).__abs__()
elif polytype=='y_of_x':
aY=poly(axx)
aOffsets=(ayy-aY).__abs__()
else:
return cosmics,0,0,rr
close_cutL=1.2+stretchL_total*rr_per_step
close_cutR=1.2+stretchR_total*rr_per_step
extend_track_spotsL=aOffsets<close_cutL
extend_track_spotsR=aOffsets<close_cutR
if not extend_track_spotsL.any() or not extend_track_spotsR.any():
return cosmics,0,0,rr
#get the corner spots!
end_yy,end_xx=nonzero(cosmic_ends)
if polytype=='x_of_y':
end_X=poly(end_yy)
endpts_off=(end_xx-end_X).__abs__()
elif polytype=='y_of_x':
end_Y=poly(end_xx)
endpts_off=(end_yy-end_Y).__abs__()
endpts=zip(end_yy,end_xx)
UR=array([end[0]+end[1] for end in endpts])
UL=array([end[0]-end[1] for end in endpts])
LL=array([-end[0]-end[1] for end in endpts])
LR=array([-end[0]+end[1] for end in endpts])
close_enoughL=endpts_off<close_cutL+.5 #give it an extra 1/2 pixel so it has a chance of picking up neighbors
close_enoughR=endpts_off<close_cutR+.5 #give it an extra 1/2 pixel so it has a chance of picking up neighbors
Lce=close_enoughL.any()
Rce=close_enoughR.any()
if not Lce and not Rce:
return cosmics,0,0,rr
if Lce:
endpts_Lstandard=[endpt for i,endpt in enumerate(endpts) if close_enoughL[i]]
UR_Lstandard=UR[close_enoughL]
UL_Lstandard=UL[close_enoughL]
LL_Lstandard=LL[close_enoughL]
LR_Lstandard=LR[close_enoughL]
URpt_Lstandard=endpts_Lstandard[UR_Lstandard.argmax()]
ULpt_Lstandard=endpts_Lstandard[UL_Lstandard.argmax()]
LLpt_Lstandard=endpts_Lstandard[LL_Lstandard.argmax()]
LRpt_Lstandard=endpts_Lstandard[LR_Lstandard.argmax()]
if Rce:
endpts_Rstandard=[endpt for i,endpt in enumerate(endpts) if close_enoughR[i]]
UR_Rstandard=UR[close_enoughR]
UL_Rstandard=UL[close_enoughR]
LL_Rstandard=LL[close_enoughR]
LR_Rstandard=LR[close_enoughR]
URpt_Rstandard=endpts_Rstandard[UR_Rstandard.argmax()]
ULpt_Rstandard=endpts_Rstandard[UL_Rstandard.argmax()]
LLpt_Rstandard=endpts_Rstandard[LL_Rstandard.argmax()]
LRpt_Rstandard=endpts_Rstandard[LR_Rstandard.argmax()]
#make sure they are extending along the main axis of the track
try:
m,b=poly.coeffs #for line y=m*x+b or x=m*y+b
if math.fabs(m)<slope_flat_cut:
if polytype=='x_of_y':
title_extras=' ***|srt8 UP and DOWN|*** '
Ltype=1
if Rce:
UR_pt=URpt_Rstandard;UL_pt=ULpt_Rstandard
Ux_midpt=(UR_pt[1]+UL_pt[1])/2.0
Rxxyy_spots=extend_track_spotsR*(ayy>=max(UR_pt[0],UL_pt[0])-1)*((axx<=Ux_midpt+1)*(axx>=Ux_midpt-1)) #upper restricted
if Lce:
LR_pt=LRpt_Lstandard;LL_pt=LLpt_Lstandard
Lx_midpt=(LR_pt[1]+LL_pt[1])/2.0
Lxxyy_spots=extend_track_spotsL*(ayy<=min(LR_pt[0],LL_pt[0])+1)*((axx<=Lx_midpt+1)*(axx>=Lx_midpt-1)) #lower restricted
elif polytype=='y_of_x':
title_extras=' ***_srt8 RIGHT and LEFT_*** '
Ltype=2
if Rce:
UR_pt=URpt_Rstandard;LR_pt=LRpt_Rstandard
Ry_midpt=(UR_pt[0]+LR_pt[0])/2.0
Rxxyy_spots=extend_track_spotsR*(axx>=max(UR_pt[1],LR_pt[1])-1)*((ayy<=Ry_midpt+1)*(ayy>=Ry_midpt-1)) #right restricted
if Lce:
UL_pt=ULpt_Lstandard;LL_pt=LLpt_Lstandard
Ly_midpt=(UL_pt[0]+LL_pt[0])/2.0
Lxxyy_spots=extend_track_spotsL*(axx<=min(UL_pt[1],LL_pt[1])+1)*((ayy<=Ly_midpt+1)*(ayy>=Ly_midpt-1)) #left restricted
elif math.fabs(m)>slope_flat_cut**(-1):
if polytype=='x_of_y':
title_extras=' ***_srt8 RIGHT and LEFT_*** '
Ltype=3
if Rce:
UR_pt=URpt_Rstandard;LR_pt=LRpt_Rstandard
Ry_midpt=(UR_pt[0]+LR_pt[0])/2.0
Rxxyy_spots=extend_track_spotsR*(axx>=max(UR_pt[1],LR_pt[1])-1)*((ayy<=Ry_midpt+1)*(ayy>=Ry_midpt-1)) #right restricted
if Lce:
UL_pt=ULpt_Lstandard;LL_pt=LLpt_Lstandard
Ly_midpt=(UL_pt[0]+LL_pt[0])/2.0
Lxxyy_spots=extend_track_spotsL*(axx<=min(UL_pt[1],LL_pt[1])+1)*((ayy<=Ly_midpt+1)*(ayy>=Ly_midpt-1)) #left restricted
elif polytype=='y_of_x':
title_extras=' ***|srt8 UP and DOWN|*** '
Ltype=4
if Rce:
UR_pt=URpt_Rstandard;UL_pt=ULpt_Rstandard
Ux_midpt=(UR_pt[1]+UL_pt[1])/2.0
Rxxyy_spots=extend_track_spotsR*(ayy>=max(UR_pt[0],UL_pt[0])-1)*((axx<=Ux_midpt+1)+(axx>=Ux_midpt-1)) #upper restricted
if Lce:
LR_pt=LRpt_Lstandard;LL_pt=LLpt_Lstandard
Lx_midpt=(LR_pt[1]+LL_pt[1])/2.0
Lxxyy_spots=extend_track_spotsL*(ayy<=min(LR_pt[0],LL_pt[0])+1)*((axx<=Lx_midpt+1)+(axx>=Lx_midpt-1)) #lower restricted
elif m>0:
title_extras=' ***/UPPER RIGHT and LOWER LEFT/*** '
Ltype=5
if Rce:
ur=(axx>=URpt_Rstandard[1]-1)*(ayy>=URpt_Rstandard[0]-1)
Rxxyy_spots=extend_track_spotsR*ur
if Lce:
ll=(axx<=LLpt_Lstandard[1]+1)*(ayy<=LLpt_Lstandard[0]+1)
Lxxyy_spots=extend_track_spotsL*ll
elif m<0:
title_extras=' ***\\UPPER LEFT and LOWER RIGHT\\*** '
Ltype=6
if Rce:
lr=(axx>=LRpt_Rstandard[1]-1)*(ayy<=LRpt_Rstandard[0]+1)
Rxxyy_spots=extend_track_spotsR*lr
if Lce:
ul=(axx<=ULpt_Lstandard[1]+1)*(ayy>=ULpt_Lstandard[0]-1)
Lxxyy_spots=extend_track_spotsL*ul
except ValueError:
return cosmics,0,0,rr
except AttributeError:
return cosmics,0,0,rr
#pick the things from Rxxyy_spots and Lxxyy_spots which have the highest value
if Rce:
Rxx,Ryy=axx[Rxxyy_spots],ayy[Rxxyy_spots]
Rpts=zip(Ryy,Rxx)
Rpts_vals=array([CRfiltstamp[o] for o in Rpts])
Rabove_thresh=Rpts_vals>thresh
Rinclude=(Rabove_thresh).any()
else: Rinclude=False
if Lce:
Lxx,Lyy=axx[Lxxyy_spots],ayy[Lxxyy_spots]
Lpts=zip(Lyy,Lxx)
Lpts_vals=array([CRfiltstamp[o] for o in Lpts])
Labove_thresh=Lpts_vals>thresh
Linclude=(Labove_thresh).any()
else: Linclude=False
if not Rinclude and not Linclude:
return cosmics,0,0,rr
#now get edges
cosmics_final=cosmics.copy()
cosmics_expanded1=scipy.ndimage.binary_dilation(cosmic_ends,structure=conn8, iterations=1)
if Rinclude:
R_Tedge_or_Fouter=array([cosmics_expanded1[o] for o in Rpts])
outer_above_thresh=Rabove_thresh[logical_not(R_Tedge_or_Fouter)].any()
inner_above_thresh=Rabove_thresh[(R_Tedge_or_Fouter)].any()
Rpts2include=set([])
if outer_above_thresh: #then take the max outer thing and it's edges above the thresh
out_pt=Rpts[Rpts_vals.argmax()]
outer_surrounding=set([(out_pt[0]+mx,out_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
outer_above_thresh=set([pt for i,pt in enumerate(Rpts) if Rabove_thresh[i]])
Rpts2include=Rpts2include.union(set.intersection(outer_above_thresh,outer_surrounding))
outer_inner_connection=set([pt for i,pt in enumerate(Rpts) if R_Tedge_or_Fouter[i]])
Rpts2include=Rpts2include.union(set.intersection(outer_inner_connection,outer_surrounding))
if inner_above_thresh: #then take the max inner thing and it's edges above the thresh
in_pt=Rpts[Rpts_vals.argmax()]
inner_above_thresh=set([pt for i,pt in enumerate(Rpts) if Rabove_thresh[i]])
inner_surrounding=set([(in_pt[0]+mx,in_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
Rpts2include=Rpts2include.union(set.intersection(inner_above_thresh,inner_surrounding))
for o in Rpts2include:
cosmics_final[o]=True
if Linclude:
L_Tedge_or_Fouter=array([cosmics_expanded1[o] for o in Lpts])
outer_above_thresh=Labove_thresh[logical_not(L_Tedge_or_Fouter)].any()
inner_above_thresh=Labove_thresh[(L_Tedge_or_Fouter)].any()
Lpts2include=set([])
if outer_above_thresh: #then take the max outer thing and it's edges above the thresh
out_pt=Lpts[Lpts_vals.argmax()]
outer_above_thresh=set([pt for i,pt in enumerate(Lpts) if Labove_thresh[i]])
outer_surrounding=set([(out_pt[0]+mx,out_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
Lpts2include=Lpts2include.union(set.intersection(outer_above_thresh,outer_surrounding))
outer_inner_connection=set([pt for i,pt in enumerate(Lpts) if L_Tedge_or_Fouter[i]])
Lpts2include=Lpts2include.union(set.intersection(outer_inner_connection,outer_surrounding))
if inner_above_thresh: #then take the max inner thing and it's edges above the thresh
in_pt=Lpts[Lpts_vals.argmax()]
inner_above_thresh=set([pt for i,pt in enumerate(Lpts) if Labove_thresh[i]])
inner_surrounding=set([(in_pt[0]+mx,in_pt[1]+my) for mx,my in itertools.product([-1,0,1],[-1,0,1])])
Lpts2include=Lpts2include.union(set.intersection(inner_above_thresh,inner_surrounding))
for o in Lpts2include:
cosmics_final[o]=True
########f=figure(figsize=(11,10))
########ax2=f.add_subplot(1,2,2)
########ax1=f.add_subplot(10,2,19)
########yy1,xx1=nonzero(cosmics)
########yy2,xx2=nonzero(cosmics_final*logical_not(cosmics))
########ax2.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
########ax2.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',s=40)
########ax2.scatter(xx2,yy2,s=35,alpha=.5,marker='x',edgecolors='w',facecolors='None')
########xx_ends_plot=[]
########yy_ends_plot=[]
########if Rce:
######## ULLRxx_Rends_plot=[pt[1] for pt in [ULpt_Rstandard,LRpt_Rstandard]]
######## ULLRyy_Rends_plot=[pt[0] for pt in [ULpt_Rstandard,LRpt_Rstandard]]
######## URLLxx_Rends_plot=[pt[1] for pt in [URpt_Rstandard,LLpt_Rstandard]]
######## URLLyy_Rends_plot=[pt[0] for pt in [URpt_Rstandard,LLpt_Rstandard]]
######## ax2.scatter(URLLxx_Rends_plot,URLLyy_Rends_plot,s=60,marker='>',edgecolors='yellow',facecolors='None',label='UR/LL')
######## ax2.scatter(ULLRxx_Rends_plot,ULLRyy_Rends_plot,s=60,marker='>',edgecolors='purple',facecolors='None',label='UL/LR')
######## xx_ends_plot+=ULLRxx_Rends_plot;xx_ends_plot+=URLLxx_Rends_plot
######## yy_ends_plot+=ULLRyy_Rends_plot;yy_ends_plot+=URLLyy_Rends_plot
########if Lce:
######## ULLRxx_Lends_plot=[pt[1] for pt in [ULpt_Lstandard,LRpt_Lstandard]]
######## ULLRyy_Lends_plot=[pt[0] for pt in [ULpt_Lstandard,LRpt_Lstandard]]
######## URLLxx_Lends_plot=[pt[1] for pt in [URpt_Lstandard,LLpt_Lstandard]]
######## URLLyy_Lends_plot=[pt[0] for pt in [URpt_Lstandard,LLpt_Lstandard]]
######## ax2.scatter(URLLxx_Lends_plot,URLLyy_Lends_plot,s=60,marker='<',edgecolors='yellow',facecolors='None',label='UR/LL')
######## ax2.scatter(ULLRxx_Lends_plot,ULLRyy_Lends_plot,s=60,marker='<',edgecolors='purple',facecolors='None',label='UL/LR')
######## xx_ends_plot+=ULLRxx_Lends_plot;xx_ends_plot+=URLLxx_Lends_plot
######## yy_ends_plot+=ULLRyy_Lends_plot;yy_ends_plot+=URLLyy_Lends_plot
########f.suptitle('white "x"=added by stretching , black "o"=there before \n yellow ">"=UR/LL_Rstandard , purple ">"=UL/LR_Rstandard || yellow "<"=UR/LL_Lstandard , purple "<"=UL/LR_Lstandard\n'+title_extras)
########ax1.set_frame_on(False)
########f=imagetools.AxesStripText(f,axes=[ax1],allticks=True,titles=False)
########ax1.set_title('stretchL_total=%s\nstretchR_total=%s\nrr=%.3f\npolytype=%s\nLtype=%s\nm=%.3f\nLce=%s Linclude=%s\nRce=%s Rinclude=%s' % (stretchL_total,stretchR_total,rr,polytype,Ltype,m,Lce,Linclude,Rce,Rinclude))
########ax2.set_xlim(min(xx_ends_plot)-5,max(xx_ends_plot)+5)
########ax2.set_ylim(min(yy_ends_plot)-5,max(yy_ends_plot)+5)
########NameString='pltRevise%s_stretch-TS%.4i-iter%s' % (OFB,ts_count,name_extras)
########f=imagetools.NameFileDate(f,NameString,FileString,DateString)
########f.savefig(plotdir+NameString)
########close(f);del f
global ts_count
ts_count+=1
return cosmics_final,Linclude,Rinclude,rr
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def iter_track_stretch(cosmics, CRfiltstamp,bthresh,BASE,l,star_stamp,name_extras='',ts_rr_cut=1.8,rr_per_step=.07,track_len_cut=4):
'''run track_stretcher over and over until it converges'''
yy_lin,xx_lin=nonzero(cosmics)
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
if track_length<track_len_cut:
return cosmics,0
stretch_countL=0
stretch_countR=0
stretch=1
########cosmics_no_stretch=cosmics.copy() #noplot
while stretch:
cosmics,stretchL,stretchR,rr=track_stretcher(cosmics,CRfiltstamp,bthresh,star_stamp,stretch_countL,stretch_countR,ts_rr_cut,name_extras,rr_per_step)
stretch_countL+=stretchL
stretch_countR+=stretchR
stretch=stretchL or stretchR
stretch_count=stretch_countL+stretch_countR
########global ts_count
########ts_count+=1
########if stretch_count:
######## f=figure(figsize=(11,10))
######## ax=f.add_subplot(1,1,1)
######## yy1,xx1=nonzero(cosmics_no_stretch)
######## yy2,xx2=nonzero(cosmics*logical_not(cosmics_no_stretch))
######## ax.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
######## ax.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',s=40)
######## ax.scatter(xx2,yy2,marker='x',edgecolors='w',facecolors='None')
######## bthresh_tag=('bthresh%.3i' % (bthresh))
######## NameString='pltRevise%s_stretch-TS%.4i-%s-label%.4i%s' % (OFB,ts_count,bthresh_tag,l,name_extras)
######## ax.set_title('white "x" = added by stretching\n# stretch iterations Left=%s Right=%s\nstretch threshold=%s (label=%s) rr=%.3f' % (stretch_countL,stretch_countR,bthresh_tag,l,rr))
######## f=imagetools.NameFileDate(f,NameString,FileString,DateString)
######## if cosmics.size>100:
######## ax.set_xlim(min(xx1.min(),xx2.min())-3,max(xx1.max(),xx2.max())+3)
######## ax.set_ylim(min(yy1.min(),yy2.min())-3,max(yy1.max(),yy2.max())+3)
######## f.savefig(plotdir+NameString)
######## close(f);del f
########else:
######## f=figure(figsize=(11,10))
######## ax=f.add_subplot(1,1,1)
######## yy1,xx1=nonzero(cosmics_no_stretch)
######## ax.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
######## ax.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',s=40)
######## bthresh_tag=('bthresh%.3i' % (bthresh))
######## NameString='pltRevise%s_stretch-TS%.4i-unstretchable-%s-label%.4i%s' % (OFB,ts_count,bthresh_tag,l,name_extras)
######## ax.set_title('UNSTRETCHABLE (label=%s) rr=%.3f' % (l,rr))
######## f=imagetools.NameFileDate(f,NameString,FileString,DateString)
######## f.savefig(plotdir+NameString)
######## close(f);del f
return cosmics,stretch_count
def connector(cosmics):
'take non-connected cosmics (that are almost connected) and connect them'
contig_checkseg,Npieces=scipy.ndimage.label(cosmics,conn8)
del contig_checkseg
if Npieces<=1:
return cosmics
rr,poly,polytype=polyfitter(cosmics,1)
if rr>3.0:
return cosmics
hull_final=skimage.morphology.convex_hull_image(cosmics) * logical_not(cosmics)
hyy,hxx=nonzero(hull_final)
if polytype=='x_of_y':
hX=poly(hyy)
hOffsets=(hxx-hX).__abs__()
elif polytype=='y_of_x':
hY=poly(hxx)
hOffsets=(hyy-hY).__abs__()
else:
return cosmics
if rr<.6:hull_rr=.6
elif rr>1.2:hull_rr=1.2
else: hull_rr=rr
hull_extend_cosmics=hOffsets<hull_rr
Hxx,Hyy=hxx[hull_extend_cosmics],hyy[hull_extend_cosmics]
Hpts=zip(Hyy,Hxx)
for o in Hpts:
cosmics[o]=True
return cosmics
#END: TRACK STRETCHING and CONNECTING
#START: BLENDING FUNCTIONS
def ExpandMaskAbove(image,mask,EdgeThresh):
'''take the input mask and add in edges that are above some threshold'''
expand_mask=scipy.ndimage.binary_dilation(mask,structure=conn8, iterations=1) #we use conn4 in step5_make_inputs_and_outputs.2.1.py
edge_mask=expand_mask*logical_not(mask)
edgepixels=ma.array(image,mask=logical_not(edge_mask)) #mask all non-edges
edgeout=edgepixels>EdgeThresh #edges > bthresh = True & edges < bthresh = False
add2mask=ma.filled(edgeout,False) #non-edges=False and edges<bthresh=False
maskEGbthresh=mask+add2mask #maskEGbthresh=size thresholded mask OR mask edge above the EdgeThresh
return maskEGbthresh
#set cutting parameters
blend_raise_bthresh_amount=70.0
blend_rr_cut=3.0
blend_sizediff_1vs2_cut=60
blend_slope_off_cut=.06
blend_holefilledpixels_cut=21
def blocked_blender(bthresh,CRfiltimage,CRll,CRslices,starbools,CRseg):
'''take input CR detections and output detections that have been blended (surroundings have been included in the mask if they were above bthresh)
and blocked (meaning they are blocked from hitting a detected object)
'''
try:
print '\n############# START BLEND: bthresh = '+str(bthresh)+" ###################"
blend_Niters=[];blend_ended=[]
blended_CRseg=CRseg.copy()
bthresh2=bthresh+blend_raise_bthresh_amount
bthresh_raise_tag=('bthresh%.i_to_%.i' % (bthresh,bthresh+blend_raise_bthresh_amount))
'''
THESIS-BlendFunc:blending function begins by looping over each track in the initial masks
'''
for l in CRll:
sl=CRslices[l-1]
sle=imagetools.slice_expand(sl,100)
CRfiltstamp=CRfiltimage[sle]
SBstamp=starbools[sle]
cosmics1=blended_CRseg[sle]==l
cosmics2=cosmics1.copy()
#iterate a max of 100 times to expand to neighboring pixels above bthresh
'''
THESIS-BlendFunc: for each mask region, we include the pixels neighboring a track if those pixels are above the blending threshold.
THESIS-BlendFunc: we iteratively continue the inclusion of neighboring pixels above the threshold, allowing the track to expand, until it either:
THESIS-BlendFunc: (1) expansion converges, whenever the iteration allows for no neighboring pixels to be included
THESIS-BlendFunc: (2) expands into a star or a saturation spike
THESIS-BlendFunc: (3) reaches the 100 iteration limit
'''
for i in range(100): #limit to 100 iterations
cosmicsb4=cosmics1.copy()
cosmics1=ExpandMaskAbove(CRfiltstamp,cosmicsb4,bthresh)
if (cosmics1==cosmicsb4).all():
blend_ended.append(0)
break
if SBstamp[cosmics1].any():
blend_ended.append(1)
break
else:
blend_ended.append(2)
blend_Niters.append(i+1)
'''
THESIS-BlendFunc: This is done at two different thresholds, one at the fiducial threshold, and one 70 counts larger, referred to as the fiducial mask and raised mask respectively.
'''
for i in range(100): #limit to 100 iterations
cosmics2b4=cosmics2.copy()
cosmics2=ExpandMaskAbove(CRfiltstamp,cosmics2b4,bthresh2)
if (cosmics2==cosmics2b4).all():
break
if SBstamp[cosmics2].any():
break
# if the higher threshold result is way smaller, then consider returning the 2nd one
size_diff12=cosmics1.sum()-cosmics2.sum()
#key: cosmics1 is "fiducial mask" and cosmics2 is "raised mask"
'''
THESIS-BlendFunc: The raised mask will inevitably have a smaller area, if it is smaller by 60 pixels or more, it indicates that the algorithm overmasked at the fiducial blending threshold, and we consider choosing the raised mask instead.
THESIS-BlendFunc: This choice is based on several criteria:
THESIS-BlendFunc: (1) if less than 3% of the pixels in fiducial mask are open8 pixels, then fiducial mask isn't a blob, so stick with fiducial mask (rather than switching to raised mask)
THESIS-BlendFunc: (2) if more than 3% are open8, then we fit a line to the (x,y) coordinates of the pixels in the track.
THESIS-BlendFunc: ...Since CR tracks often look like lines, if the R^2 value per pixel is bad (>3), it is an indication that it might not be a track.
THESIS-BlendFunc: (2a) if the R^2 value per pixel is bad (>3), and if greater than 20% of the region is open8, then use raised mask
THESIS-BlendFunc: (there is also an iterative track stretcher run if the fit is really good (R^2<1.2))
THESIS-BlendFunc: (2b) if the R^2 value per pixel is bad (>3), and less than 20% of the region is open8, then use fiducial mask (not much difference, and if fiducial mask is really that bad, it'll get dropped later)
THESIS-BlendFunc: (2c) if the R^2 value per pixel is good (<3) for raised mask, then try it for fiducial mask as well.
THESIS-BlendFunc: (2c) if the R^2 value per pixel is bad (>4) for fiducial mask OR their slopes are different (by at least 6%), then use raised mask
THESIS-BlendFunc: (there is also an iterative track stretcher run if the fit is really good (R^2<1.2))
THESIS-BlendFunc: (2c) if the R^2 value per pixel is good (<4) for fiducial mask as well and their slopes are similar (within 6%), then use fiducial mask, since they are both lines along the same trajectory anyway.
THESIS-BlendFunc: (3) if there are at least 21 pixels contained within holes in the mask, then it's a ring and you should choose the raised mask, even though it's probably not any better (can't be worse!)
'''
if size_diff12>blend_sizediff_1vs2_cut:
open8_cosmics=scipy.ndimage.binary_opening(cosmics1,conn8)
open8_Nspots= float(open8_cosmics.sum())
open8_frac=open8_Nspots/cosmics1.sum()
if open8_frac<.03: #if less than 3% of the pixels in cosmics1 are open8 pixels, then cosmics1 isn't a blob, so stick with cosmics1 (rather than switching to cosmics2)
if PLOT_ON_OFF:
yyp1,xxp1=nonzero(cosmics1);yyp2,xxp2=nonzero(cosmics2)
xxpmin=min(xxp1.min(),xxp2.min());xxpmax=max(xxp1.max(),xxp2.max())
yypmin=min(yyp1.min(),yyp2.min());yypmax=max(yyp1.max(),yyp2.max())
f=figure(figsize=(12,9));f.add_subplot(121);title('FINAL: cosmics1');imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xxp1,yyp1,marker='o',edgecolors='k',facecolors='None')
xlim(xxpmin,xxpmax)
ylim(yypmin,yypmax)
f.add_subplot(122);title('cosmics2');imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xxp2,yyp2,marker='o',edgecolors='k',facecolors='None')
xlim(xxpmin,xxpmax)
ylim(yypmin,yypmax)
f.suptitle('Stick with cosmics1\ntrue that: open8_frac=%.3f < .03\nhence its cosmics1 isnt a blob, so stick with cosmics1' % (open8_frac,))
f.text(.003,.004,bthresh_raise_tag+': open8_frac < .03, hence its cosmics1 isnt a blob, so stick with cosmics1' ,size=10)
f.savefig(plotdir+'pltRevise%s_failed_raise_thresh_%s-No_Open8-label%.4i' % (OFB,bthresh_raise_tag,l))
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
else:
rr2,poly2,polytype2=polyfitter(cosmics2,1)
if isnan(rr2) or rr2>blend_rr_cut: #if the linefit looks like crap for cosmics2, then it probably will for cosmics1, so we assume it's not a line and take 2nd mask
if open8_frac>.2: #if greater than 20% of the image is open8, then use cosmics2:
cosmics1_old=cosmics1.copy()
cosmics1=cosmics2
'''
THESIS-BlendFunc: this is nearly always an improvement, where the masking bled into other objects and has to be returned to a sane size (i.e. cosmics2)
'''
#2019: no need to make these plots, there are a ton of them and they're always an improvement
if 0:
if PLOT_ON_OFF:
f=figure(figsize=(12,9));f.add_subplot(121);title('cosmics1');yy,xx=nonzero(cosmics1_old);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.add_subplot(122);title('FINAL: cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.suptitle('Going from cosmics1 to cosmics2!\npassed: open8_frac=%.3f > .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\npassed: open8_frac=%.3f >.2' % (open8_frac,rr2,blend_rr_cut,open8_frac))
f.savefig(plotdir+'pltRevise%s_passed_raise_thresh_%s-simple-label%.4i' % (OFB,bthresh_raise_tag,l))
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
else: #if linefit looks like crap for cosmics2 AND less than 20% of the image is open8, then use cosmics1:
if PLOT_ON_OFF:
yyp1,xxp1=nonzero(cosmics1);yyp2,xxp2=nonzero(cosmics2)
xxpmin=min(xxp1.min(),xxp2.min());xxpmax=max(xxp1.max(),xxp2.max())
yypmin=min(yyp1.min(),yyp2.min());yypmax=max(yyp1.max(),yyp2.max())
f=figure(figsize=(12,9));f.add_subplot(121);title('FINAL: cosmics1');imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xxp1,yyp1,marker='o',edgecolors='k',facecolors='None')
xlim(xxpmin,xxpmax)
ylim(yypmin,yypmax)
f.add_subplot(122);title('cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xxp2,yyp2,marker='o',edgecolors='k',facecolors='None')
xlim(xxpmin,xxpmax)
ylim(yypmin,yypmax)
f.suptitle('Stick with cosmics1!\npassed: open8_frac=%.3f > .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\nfailed open8_frac=%.3f >.2' % (open8_frac,rr2,blend_rr_cut,open8_frac))
f.text(.003,.004,bthresh_raise_tag+': open8_frac < .20, so even though linefit is decent, just stick with cosmics1' ,size=10)
f.savefig(plotdir+'pltRevise%s_failed_raise_thresh_%s-simple-label%.4i' % (OFB,bthresh_raise_tag,l))
print 'if linefit looks like crap for cosmics2 AND less than 20% of the image is open8, then use cosmics1'
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
else: #if the line fit is decent for cosmics2, try the line fit for cosmics1
yy2,xx2=nonzero(cosmics2)
slope2=poly2.coeffs[0]
#try:
# slope2=poly2.coeffs[0]
#except ValueError:
# pass
#except AttributeError:
# pass
yy1,xx1=nonzero(cosmics1*logical_not(cosmics2))
yy3,xx3=nonzero(cosmics1)
if polytype2=='y_of_x': #if rXY2<rYX2:
pXY1, residualsXY1, rankXY1, singular_valuesXY1, rcondXY1 = polyfit(xx1,yy1,1,full=True)
slope1=pXY1[0]
slope_off=abs(slope2-slope1)
rr1=residualsXY1[0]/len(xx1)
poly1 = poly1d(pXY1)
X3=arange(xx3.min(),xx3.max(),.1)
pltxx1,pltyy1=X3,poly1(X3)
pltxx2,pltyy2=X3,poly2(X3)
else: #if polytype2=='x_of_y': #if rYX2<rXY2:
pYX1, residualsYX1, rankYX1, singular_valuesYX1, rcondYX1 = polyfit(yy1,xx1,1,full=True)
slope1=pYX1[0]
slope_off=abs(slope2-slope1)
rr1=residualsYX1[0]/len(xx1)
poly1 = poly1d(pYX1)
Y3=arange(yy3.min(),yy3.max(),.1)
pltxx1,pltyy1=poly1(Y3),Y3
pltxx2,pltyy2=poly2(Y3),Y3
if isnan(rr1) or rr1>(blend_rr_cut+1.0) or slope_off>blend_slope_off_cut: #if the R^2 value per pixel is bad (>4) for cosmics1 OR their slopes are different (by at least 6%), then use cosmics2
#stretch cosmics2 if it's linear
if rr2<1.2:
cosmics2_addons,count_stretch2=iter_track_stretch(cosmics2, CRfiltstamp,bthresh*.4,BASE,l,SBstamp,name_extras='_InBlender2',ts_rr_cut=2.0,rr_per_step=.04)
cosmics2[cosmics2_addons*cosmics1]=True
cosmics2=connector(cosmics2)
else:
count_stretch2=0
#make sure picking cosmics2 doesn't mean that we're breaking the track into smaller pieces
contig_checkseg,Npieces2=scipy.ndimage.label(cosmics2,conn8)
contig_checkseg,Npieces1=scipy.ndimage.label(cosmics1,conn8)
if Npieces2<=Npieces1: #if picking cosmics2 doesn't break the track up into smaller pieces, then continue
if PLOT_ON_OFF:
'''
THESIS-BlendFunc: this is nearly always an improvement, where the masking bled into other objects and has to be returned to a sane size (i.e. cosmics2)
'''
#2019: no need to make these plots, there are a ton of them and they're always an improvement
if 0:
f=figure(figsize=(12,9));f.add_subplot(121);title('cosmics1');yy,xx=nonzero(cosmics1);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.add_subplot(122);title('FINAL: cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
f.suptitle('Going from cosmics1 to cosmics2! (count_stretch2=%s)\npassed: open8_frac=%.3f < .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\npassed: rr1=%.3f>blend_rr_cut+1=%.3f or slope_off=%.3f>blend_slope_off_cut=%.3f' % (count_stretch2,open8_frac,rr2,blend_rr_cut,rr1,blend_rr_cut+1.0,slope_off,blend_slope_off_cut))
f.savefig(plotdir+'pltRevise%s_passed_raise_thresh_%s-higher_thresh_much_smaller-label%.4i' % (OFB,bthresh_raise_tag,l))
if PLOT_ON_OFF_SHOW:show()
else: close(f);del f
cosmics1=cosmics2
elif PLOT_ON_OFF: #else cosmics1 stays the same because I determine that they are both lines along the same trajectory!
f=figure(figsize=(10,5))
f.suptitle('Stick with cosmics1!\npassed: open8_frac=%.3f < .03\npassed: rr2=%.3f>blend_rr_cut=%.3f\nfailed: rr1=%.3f>blend_rr_cut=%.3f and slope_off=%.3f>blend_slope_off_cut=%.3f' % (open8_frac,rr2,blend_rr_cut,rr1,blend_rr_cut,slope_off,blend_slope_off_cut))
ax=f.add_subplot(1,1,1)
ax.imshow(CRfiltstamp,interpolation='nearest',origin='lower left')
ax.scatter(xx1,yy1,marker='o',edgecolors='k',facecolors='None',label='cosmics1')
ax.scatter(xx2,yy2,marker='x',edgecolors='w',facecolors='None',label='cosmics2')
ax.plot(pltxx2,pltyy2,'w')
ax.plot(pltxx1,pltyy1,'k--')
ax.set_ylim(yy3.min()-3,yy3.max()+3)
ax.set_xlim(xx3.min()-3,xx3.max()+3)
f.text(.003,.004,bthresh_raise_tag+': the linefit is decent for both, theyre on the same trajectory, so just stick with cosmics1' ,size=10)
f.savefig(plotdir+'pltRevise%s_failed_raise_thresh_%s-SameTrajectory-label%.4i' % (OFB,bthresh_raise_tag,l))
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
#get the number hole pixels using the simple way of doing it rather than using `holefilledpixels=count_hole_filled_pixels(cosmics1)`
if bthresh==bthresh1: #only do the holefilled cut raise if it's the first time using blender
holefilledpixels=(scipy.ndimage.binary_fill_holes(cosmics1)!=cosmics1).sum()
if holefilledpixels>blend_holefilledpixels_cut:
#2019: no need to make these plots, there are a ton of them and they're always an improvement
if 0 and PLOT_ON_OFF:
yyp1,xxp1=nonzero(cosmics1);yyp2,xxp2=nonzero(cosmics2)
xxpmin=min(xxp1.min(),xxp2.min());xxpmax=max(xxp1.max(),xxp2.max())
yypmin=min(yyp1.min(),yyp2.min());yypmax=max(yyp1.max(),yyp2.max())
f=figure(figsize=(12,9));f.add_subplot(121);title('cosmics1');yy,xx=nonzero(cosmics1);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
xlim(xxpmin,xxpmax)
ylim(yypmin,yypmax)
f.add_subplot(122);title('FINAL: cosmics2');yy,xx=nonzero(cosmics2);imshow(CRfiltstamp,interpolation='nearest',origin='lower left');scatter(xx,yy,marker='o',edgecolors='k',facecolors='None')
xlim(xxpmin,xxpmax)
ylim(yypmin,yypmax)
f.suptitle('Go from cosmics1 to cosmics2!')
f.text(.003,.004,bthresh_raise_tag+': choose cosmics2 over cosmics1, simply because of the holes...helps little' ,size=10)
f.savefig(plotdir+'pltRevise%s_passed_raise_thresh_%s-holefilledpixels-label%.4i' % (OFB,bthresh_raise_tag,l))
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
print "holefilledpixels: ",holefilledpixels
cosmics1=cosmics2
blended_CRseg[sle][cosmics1]=l
if PLOT_ON_OFF_SHOW:show()
#loop ends if mask (1) converges, (2) hits a star, or (3) hits 100 iterations
blend_ended=array(blend_ended)
print "times converged: ",(blend_ended==0).sum()
print "times hit star : ",(blend_ended==1).sum()
print "times 100 iters: ",(blend_ended==2).sum()
print "at bthresh %.3i it converges after a mean of %.3f iterations" % (bthresh,numpy.mean(blend_Niters))
print "# iterations=",blend_Niters
print '############# END BLEND: bthresh = '+str(bthresh)+" ###################\n"
return blended_CRseg
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
#END: BLENDING FUNCTIONS
#START: LABEL FUNCTIONS
def plotlabels(ll,segments=None,slices=None,params=None,background=None):
'''plot stamps of all of the masks in the label list `ll`.'''
try:
if segments is None:segments=BBCRseg
if slices is None: slices=BBCRslices
if params is None: params=ll
if background is None: background=image
patches=[]
for l in ll:
patches.append(imagetools.slice_expand(tuple(slices[l-1]),3))
fig=figure(figsize=(22,13.625))
Nlabels=len(ll)
if Nlabels<=4:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(2,2))
textsize=14
elif Nlabels<=9:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(3,3))
textsize=13
elif Nlabels<=16:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(4,4))
textsize=12
elif Nlabels<=25:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(5,5))
textsize=11
elif Nlabels<=6*7:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(6,7))
textsize=10
elif Nlabels<=6*8:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(6,8))
textsize=10
elif Nlabels<=7*8:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(7,8))
textsize=9
elif Nlabels<=7*9:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(7,9))
textsize=9
else:
fig,axes = imagetools.AxesList(fig=fig,compact=.02,shape=(8,10))
fig.subplots_adjust(top=.95)
textsize=8
if len(params)==Nlabels:
for ax,sl,title,l in zip(axes,patches,params,ll):
##spots=segments[sl]>0
spots=segments[sl]==l
yy,xx=nonzero(spots)
stamp=background[sl]
ax.imshow(stamp,interpolation='nearest',origin='lower left')
ax.scatter(xx,yy,marker='o',edgecolors='k',facecolors='None',label='points')
ax.set_title(str(title),size=10)
elif len(params)==len(slices):
for ax,sl,l in zip(axes,patches,ll):
title=params[l-1]
##spots=segments[sl]>0
spots=segments[sl]==l
yy,xx=nonzero(spots)
stamp=background[sl]
ax.imshow(stamp,interpolation='nearest',origin='lower left')
ax.scatter(xx,yy,marker='o',edgecolors='k',facecolors='None',label='points')
ax.set_title(str(title),size=10)
else:
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise Exception('gotta have len(params)==len(slices) or len(params)==len(ll)')
return fig
except:
ns.update(locals())
show();print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise
def reset_labels(prob_labels,segs2reset):
'''take in a current image and reset the masks in `prob_labels` to how they were at the beginning '''
CRsegX=segs2reset.copy()
for l in prob_labels:
spots=segs2reset==l
CRsegX[spots]=0 #reset the problem labels to zero
newspots=spots*detections0
CRsegX[newspots]=l #reset the problem labels to their original value
return CRsegX
#END: LABEL FUNCTIONS
PLOT_ON_OFF=0 #0=plotting off 1=plotting on
PLOT_ON_OFF_SHOW=0 #0=plotting off 1=plotting on (this is only useful for debugging or thesis-writing)
if __name__ == "__main__":
args=imagetools.ArgCleaner(sys.argv)
if len(args)<1:
print 'args=',args
sys.exit()
fl=args[-1]
if not os.path.isfile(fl):
print "sys.argv=",sys.argv
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise Exception(fl+" is not a file!")
else:
print "starting file=",fl
#try:
# PLOT_ON_OFF=sys.argv[2]
#except:
# pass
#START: iter0
t0=time.time()
#get the image for `fl`
image=imagetools.GetImage(fl)
back_im=scipy.stats.scoreatpercentile(image,48)
CRfl=astropy.io.fits.open(fl)
header=CRfl[0].header
OBJECT=header['MYOBJ']
FILTER=header['FILTER']
CCDnum=header['IMAGEID']
#if CCDnum==7: PLOT_ON_OFF=1
#iter0: take the original files2check and prepare them for blending
files2check=[]
flname=os.path.basename(fl).split('.')[0]
if 'OCF' in fl:
BASE=os.path.basename(fl).split('OCF')[0]
else:
BASE=flname
#get cosmics images
OFB='%s_%s_%s' % (OBJECT,FILTER,BASE,)
CR_segfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_cosmics/SEGMENTATION_CRN-cosmics_%s_%s.%s.fits' % (OBJECT,FILTER,BASE,)
CR_filtfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_cosmics/FILTERED_CRN-cosmics_%s_%s.%s.fits' % (OBJECT,FILTER,BASE,)
CRfitsfl=astropy.io.fits.open(CR_filtfl)
rms=CRfitsfl[0].header['MYRMS']
rms_bins=arange(10,100,5)
#adam-tmp# rms_bins=arange(10,90,5)
bthresh1_bin=digitize([rms],rms_bins)[0] #no "-1" here because I want the top-edge of the bin, not the bottom edge
#adam-tmp# if bthresh1_bin==0 or bthresh1_bin>15:
if bthresh1_bin==0 or bthresh1_bin>17:
print "adam-Error: in running BB on fl=",fl,"\n\nrun this command to check it out: ipython -i -- ~/thiswork/eyes/CRNitschke/blocked_blender.2.2.py ",fl,"\n\n"; raise Exception('this rms just is not right')
bthresh1=rms_bins[bthresh1_bin]
dt=CRfitsfl[0].header['CRN_DT']*rms#; ft=CRfitsfl[0].header['CRN_FT']*rms
dt_times_pt01=int(dt*.01+1) #this is like a ceiling function
CRfiltimage=CRfitsfl[0].data
CRfiltheader=CRfitsfl[0].header
CRfitsfl.close()
star_segfl='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_stars/SEGMENTATION_CRN-stars_%s_%s.%s.fits' % (OBJECT,FILTER,BASE,)
starseg0=asarray(imagetools.GetImage(star_segfl),dtype=int)
star0_slices=scipy.ndimage.find_objects(starseg0 )
Nstars=starseg0.max()
#remove stars that don't at least have a square in them
for i in range(1,Nstars+1):
sl=star0_slices[i-1]
spots=starseg0[sl]==i
openspots=scipy.ndimage.binary_opening(spots,array([[1,1],[1,1]]))
if not openspots.any():
starseg0[sl][spots]=0
#now add in things with 100 pixels above saturation level (in case they aren't there yet)
sat=(image-back_im)>21000
sat_big=skimage.morphology.remove_small_objects(sat,60,connectivity=2) #conn8
sat_seg,Nsat_labels=mahotas.label(sat_big,conn8)
sat_labels=arange(Nsat_labels)+1
sat_slices=scipy.ndimage.find_objects(sat_seg)
s2=skimage.morphology.star(2)
sat_spike_bools=zeros(image.shape,dtype=bool)
#add very large regions near saturation that have an s2 shape in them
for l,sl in zip(sat_labels,sat_slices):
spots=sat_seg[sl]==l
if scipy.ndimage.binary_opening(spots,s2).any():
ll_ss0=unique(starseg0[sl][spots])
ss0_bools=zeros(image.shape,dtype=bool)
ss0_bools[sl][spots]=True
#print "ss0_bools.sum() before: ",ss0_bools.sum()
for l0 in ll_ss0:
if l0==0:continue
ss0_bools+=(starseg0==l0)
#print "ss0_bools.sum() after: ",ss0_bools.sum()
starseg0[ss0_bools]=l+Nstars
sl_wY,sl_wX=imagetools.slice_size(sl)
ratio_h2w=float(sl_wY)/sl_wX
if ratio_h2w>2:
sat_spike_bools[ss0_bools]=True
#setup final star position array
sat_spike_bools=mahotas.dilate(sat_spike_bools,conn4) #dilate only those large saturation areas
starbools=mahotas.dilate(starseg0>Nstars,conn4) #dilate only those large saturation areas
starbools+=(starseg0>0)
'''
THESIS: above here, I've just (1) loaded in files (image, filtered image, star segmentation, and mask segmentation) and threshold values
THESIS: (2) masked regions of obvious saturation: (a) with 60 contiguous pixels above saturation and (b) very large regions near saturation that have an s2 shape in them, then dilate (a) and (b)
'''
#get cosmics and remove the ones that overlap with the stars (these will be replaced later, but I don't want them to be blended!)
CRseg0=asarray(imagetools.GetImage(CR_segfl),dtype=int)
CRll_for_loop=arange(CRseg0.max())+1
CRll=CRll_for_loop.tolist()
CRslices=scipy.ndimage.find_objects(CRseg0)
CRoverlapSTAR=zeros(CRseg0.shape,dtype=bool) #these are almost entirely saturation spikes!
CRoverlapSTAR_Ncosmics_mask_at_end=0
CRoverlapSTAR_Npixels_mask_at_end=0
for l in CRll_for_loop:
CRsl=CRslices[l-1]
CRspots=CRseg0[CRsl]==l
CR_on_star_frac=starbools[CRsl][CRspots].mean()
if CR_on_star_frac>0:
#test if it is a major hit or a minor hit
if CR_on_star_frac<0.5:
CRsl2=imagetools.slice_expand(CRsl,1)
STARspots=starbools[CRsl2]
STARspots2=scipy.ndimage.binary_dilation(STARspots,conn8)
CRspots2=CRseg0[CRsl2]==l
CR_on_dilated_star_frac=STARspots2[CRspots2].mean()
if CR_on_dilated_star_frac<0.5: #if it's a minor hit, then remove the overlap and continue
overlap=CRspots2*STARspots2
CRseg0[CRsl2][overlap]=0
continue
#always remove a major hit from list of CRs
CRll.remove(l)
CRseg0[CRsl][CRspots]=0
if CRspots.sum()>9: #if big enough, then remove it later
CRoverlapSTAR_Ncosmics_mask_at_end+=1
CRoverlapSTAR_Npixels_mask_at_end+=CRspots.sum()
CRoverlapSTAR[CRsl][CRspots]=1
CRll=asarray(CRll)
'''
THESIS: now I've removed the CR masks on top of stars (almost entirely saturation spikes!)
'''
#get the info needed to define the blender function
#start saving output
compare_dir='/nfs/slac/g/ki/ki18/anja/SUBARU/eyes/CRNitschke_output/data_SCIENCE_compare/'
detections0=CRseg0>0
WOblendCRfiltimage=CRfiltimage.copy()
WOblendCRfiltimage[detections0]=-2000
#save original file
hdu=astropy.io.fits.PrimaryHDU(image)
hdu.header=CRfiltheader
fl_original=compare_dir+'BBout_ORIGINAL_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_original,overwrite=True)
files2check.append(fl_original)
print 'OUTPUT FILES: original (ie. pre-blending) unfiltered image saved as: ',fl_original
#save old CR mask file
hdu=astropy.io.fits.PrimaryHDU(WOblendCRfiltimage)
hdu.header=CRfiltheader
fl_woblend=compare_dir+'BBout_WOblend_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_woblend,overwrite=True)
files2check.append(fl_woblend)
print 'OUTPUT FILES: original (ie. pre-blending) filtered image saved as: ',fl_woblend
#END: iter0
#START: iter1
t1=time.time()
#iter1: run the blender!
bthresh1_tag=('bthresh%.3i' % (bthresh1))
print 'bthresh1_tag=',bthresh1_tag
CRblended1=blocked_blender(bthresh1,CRfiltimage,CRll,CRslices,starbools,CRseg0.copy())
BBCRmask=CRblended1>0
print "Masked",float((BBCRmask).sum())/detections0.sum(),"times the number of original pixels"
BBCRseg,BBCR_Nlabels=scipy.ndimage.label(BBCRmask,conn8)
BBCRslices_b4=scipy.ndimage.find_objects(BBCRseg)
BBCRlabels=arange(BBCR_Nlabels)+1
'''
THESIS: ran first iteration of blocked-blender, re-labeled, segmented, and sliced iter1 results
'''
#get the number of holes in each detection
BBCRslices=[]
Nholefilledpixels=[]
BBCR_hit_spike=[]
for l,sl in zip(BBCRlabels,BBCRslices_b4):
spots=BBCRseg[sl]==l
Nholefilledpixels.append(count_hole_filled_pixels(spots))
sl3=imagetools.slice_expand(sl,3)
BBCRslices.append(sl3)
BBCR_hit_spike.append(sat_spike_bools[sl][spots].any())
Nholefilledpixels=asarray(Nholefilledpixels)
BBCR_hit_spike=asarray(BBCR_hit_spike)
BBCRregs=skimage.measure.regionprops(BBCRseg)
area=array([BBCRregs[i].area for i in range(BBCR_Nlabels)])
'''
THESIS: measured the size (area), the # of hole-pixels(Nholefilledpixels), and if it hit a spike (BBCR_hit_spike), for each region
'''
area_cut=8
holefilledpixels_cut=5
rr_iterN_cut=3.1
open_cut=11
open_rr_cut=0.8
#get rid of masks that are just big blobs near saturation spikes
BBCRll_spike_overlaps=BBCRlabels[BBCR_hit_spike]
consider_spikes=len(BBCRll_spike_overlaps)
if consider_spikes:
spike_overlap_fail_area_cut=area[BBCRll_spike_overlaps-1]<=area_cut #if area<area_cut, then give it a free pass
spike_overlap_reset={} #True=>reset to original form #False=>keep as is. Either way don't include in iter2 and beyond
spike_overlap_stats={}
for l_spike_fail in BBCRll_spike_overlaps[spike_overlap_fail_area_cut]:
spike_overlap_reset[l_spike_fail]=False
spike_overlap_stats[l_spike_fail]="KEEP: It's small, just keep it as is."
if PLOT_ON_OFF_SHOW:
print 'showing plots from first blocked_blender run'
show()
#iter1: select the masks big enough to be able to fail cuts
hole_cuts=Nholefilledpixels>holefilledpixels_cut
big_enough=area>area_cut
area2polyfit=area[big_enough]
BBCRlabels2polyfit=BBCRlabels[big_enough]
'''
THESIS: picked out masks which are large enough that they might fail cuts later (masks of area<8 have only one iteration)
'''
#iter1: find detections from iter1 that fail the polynomial fit cut (add to list of bad labels if poly doesn't fit well)
cut_labels2=[];cut_details2=[]
########count=0
for i,(k,size_k) in enumerate(zip(BBCRlabels2polyfit,area2polyfit)):
########Nax= i % 9 + 1
########if Nax==1:
######## count+=1
######## if i!=0:
######## f=imagetools.AxesStripText(f,allticks=True,titles=False)
######## f.savefig(plotdir+'pltRevise%s_bad_labels-polyfit_num%.3i' % (OFB,count))
######## close(f);del f
######## f=figure(figsize=(14,14))
########ax=f.add_subplot(3,3,Nax)
sl=BBCRslices[k-1]
cosmics=BBCRseg[sl]==k
########stamp=image[sl]
########ax,rr_k=cosmicpoly(k,cosmics,stamp,ax)
rr_k,poly_k,polytype_k=polyfitter(cosmics,degree=5)
open8_cosmics=scipy.ndimage.binary_opening(cosmics,conn8)
open8_Nspots=open8_cosmics.sum()
open8_frac=float(open8_Nspots)/size_k
if k in BBCRll_spike_overlaps:
if open8_frac>.2:
spike_overlap_reset[k]=True
spike_overlap_stats[k]=("RESET: (fyi rr=%.1f>%.1f) open8_frac=%.2f>.2" % (rr_k,rr_iterN_cut,open8_frac))
else:
spike_overlap_reset[k]=False
spike_overlap_stats[k]=("KEEP: (fyi rr=%.1f>%.1f) open8_frac=%.2f<.2" % (rr_k,rr_iterN_cut,open8_frac))
elif rr_k>rr_iterN_cut and open8_frac>.03:
cut_labels2.append(k)
cut_details2.append("rr=%.2f>%.2f and open8_frac=%.3f>.03" % (rr_k,rr_iterN_cut,open8_frac))
elif open8_Nspots>open_cut:
openS_cosmics=scipy.ndimage.binary_opening(cosmics,connS)
openS_Nspots=openS_cosmics.sum()
if openS_Nspots>open_cut and rr_k>open_rr_cut:
cut_labels2.append(k)
cut_details2.append("sum(S)=%s>%s sum(8)=%s>%s & rr=%.2f>%.2f" % (openS_Nspots,open_cut,open8_Nspots,open_cut,rr_k,open_rr_cut))
########ax.set_title(ax.get_title().replace('residual/#points','rr')+'\nsum(S)=%s sum(8)=%s' % (openS_Nspots,open8_Nspots),size=10.5)
'''
THESIS: masks will have their threshold raised if (poly of degree 5 has (R^2/#pts)>=3.1) AND (open8_frac>.03)
THESIS: masks will have their threshold raised if (poly of degree 5 has (R^2/#pts)>=0.8) AND (# openS spots>11)
THESIS: masks with >5 pixels in a hole will be sent to the ringer function
'''
########else:
######## f=imagetools.AxesStripText(f,allticks=True,titles=False)
######## f.savefig(plotdir+'pltRevise%s_bad_labels-polyfit_num%.3i' % (OFB,count))
######## close(f);del f
if consider_spikes:
if PLOT_ON_OFF:
f=plotlabels(spike_overlap_stats.keys(),params=spike_overlap_stats.values())
f.suptitle('before')
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.text(.003,.004,'Spike Overlap Masks, after iter1 (before resetting).')
f.savefig(plotdir+'pltSatSpikes%s-1before' % (OFB,))
if PLOT_ON_OFF_SHOW:pass # show before and after together
else: close(f);del f
results_spike_reset=array(spike_overlap_reset.values())
ll_spikes=array(spike_overlap_reset.keys())
ll_spike_reset=ll_spikes[results_spike_reset]
BBCRseg=reset_labels(ll_spike_reset,BBCRseg)
if PLOT_ON_OFF:
f=plotlabels(spike_overlap_stats.keys(),params=spike_overlap_stats.values())
f.suptitle('after')
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.text(.003,.004,'Spike Overlap Masks, after the reset.')
f.savefig(plotdir+'pltSatSpikes%s-2after' % (OFB,))
if PLOT_ON_OFF_SHOW:
print 'Spike Overlap Masks!'
show()
else: close(f);del f
#iter1: find detections from iter1 that fail the number of filled pixels cut
fillSET=set(BBCRlabels[hole_cuts])
# remove from those the spike resets, since I'm done with those now.
fillLL=array(list(fillSET.difference(ll_spike_reset)))
else:
fillLL=BBCRlabels[hole_cuts]
try:
fillLL_Nholes_filled=Nholefilledpixels[fillLL-1]
except IndexError:
fillLL_Nholes_filled=0
for l in fillLL:
if l in cut_labels2:
ind=cut_labels2.index(l)
cut_labels2.pop(ind)
cut_details2.pop(ind)
'''
THESIS: after iter1, we remove big blobs near saturation spikes. If the mask's conn8_frac>0.2, then they are reset to pre-blended size. If conn8_frac<0.2, then we keep the mask as it is, but don't include it in any later iterN of BB.
'''
if PLOT_ON_OFF:
params=['label=%s #holes=%s' % (hole_l, hole_N) for hole_l,hole_N in zip(fillLL,fillLL_Nholes_filled)]
f=plotlabels(fillLL,params=params)
f.suptitle('UNFILTERED')
f.savefig(plotdir+'pltRevise%s_bad_labels-holes_1before-unfiltered' % (OFB,))
if PLOT_ON_OFF_SHOW:pass # show filtered and unfiltered together
else: close(f);del f
f=plotlabels(fillLL,params=params,background=CRfiltimage)
f.suptitle('FILTERED')
f.savefig(plotdir+'pltRevise%s_bad_labels-holes_1before-filtered' % (OFB,))
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
#iter1: END
#BEGIN: RING
Nring_fixed1=0
for l in fillLL:
sl2=BBCRslices[l-1]
spots_ring=BBCRseg[sl2]==l
if PLOT_ON_OFF: newring,ringstat=ringer(spots_ringer=spots_ring.copy(),l_ringer=l,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
else: newring,ringstat=ringer_noplot(spots_ringer=spots_ring.copy(),l_ringer=l,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
newring_noplot,ringstat_noplot=ringer_noplot(spots_ringer=spots_ring.copy(),l_ringer=l,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
assert((newring==newring_noplot).all())
if ringstat==0:
Nring_fixed1+=1
BBCRseg[sl2][spots_ring]=0
BBCRseg[sl2][newring]=l
else:
cut_labels2.append(l)
cut_details2.append(ringstat)
'''
THESIS: ringer run once, if ringstat is 0, then the ring is fixed
'''
print '%s of potential %s rings have been fixed in first iteration' % (Nring_fixed1, len(fillLL))
#moved the "after" plot to the end
#END: RING
#START: iter2
cut_labels={2:cut_labels2}
cut_details={2:cut_details2}
CRblendeds={1:BBCRseg.copy()}
if bthresh1<25: #for things with bthresh1 very low, make the iterN thing more realistic
bthreshs={2:40.0,3:60.0,4:80.0,5:100.0,6:120.0,7:140.0,8:160.0}
elif bthresh1<60: #this used to always be the setup!
bthreshs={2:60.0,3:80.0,4:100.0,5:120.0,6:140.0,7:160.0,8:180.0}
elif bthresh1<80: #don't want to have bthresh2<bthresh1 ever!
bthreshs={2:80.0,3:100.0,4:120.0,5:140.0,6:160.0,7:180.0,8:200.0}
else:
bthreshs={2:100.0,3:120.0,4:140.0,5:160.0,6:180.0,7:200.0,8:220.0}
iterN_final=max(bthreshs.keys())
star_ERASE=zeros(CRseg0.shape,dtype=bool)
antidrop_extras=''
iterN_stats={}
'''
THESIS: For those masks that fail the aformentioned cuts, we reset them to their initial masks, then blend again using a larger fiducial bthresh, in an iterative process, until they pass those cuts. The cuts are raised 7 times, at each increment the bthresh is raised by 20 counts. If, a mask is carried through all seven iterations, the mask is accepted regardless of whether it's passed the cuts or not.
THESIS: (adam-SHNT): now, what about the blob cuts, track-stretch, and star cut? Seems like these are the only things that happen in this loop that aren't present in the first loop?
'''
for iterN in range(2,iterN_final+1):
exec "t%s=time.time()" % (iterN)
iterN_stats[iterN]={'DONE-swallowed':0,'DONE-PREVsplit':0,'DONE-Multiple Ringers':0,'NEXT-rr':0,'NEXT-ring failed':0,'NEXT-open':0,'NEXT-rr=nan size>9 open8=0':0,'REMOVED-ERASE':0,'DONE-ERASE FAILED':0,'DONE-PASSED ALL':0}
rr_iterN_cut+=.1
'''
THESIS: reset masks that fail the cuts to the way they were in the beginning
'''
#iter2: take detections from iter1 that fail the cuts and reset them to the way they were at iter0
CRsegN=reset_labels(cut_labels[iterN],CRblendeds[iterN-1]) #reset cut_labels2 from CRblendeds[iterN-1] to CRseg0
bthresh_tag=('bthresh%.3i' % (bthreshs[iterN]))
#iter2: take cut detections from iter1 (which have been reset to iter0) and reblend them at a higher thresh
CRblendeds[iterN]=blocked_blender(bthreshs[iterN],CRfiltimage,cut_labels[iterN],BBCRslices,starbools,CRsegN)
CRblendeds_slices=scipy.ndimage.find_objects(CRblendeds[iterN])
del CRsegN
print "had in iter1: ",(CRblendeds[1]>0).sum()
print "now have in iter"+str(iterN)+": ", (CRblendeds[iterN]>0).sum()
#iter2: plot detections from iter2 and determine if they pass the iter3 cuts or not
count=1;cut_labels[iterN+1]=[];cut_details[iterN+1]=[]
if PLOT_ON_OFF: f=figure(figsize=(22,13.625))
for i,probl in enumerate(cut_labels[iterN]):
title_extras=''
Nax= i % 9 + 1
if Nax==1:
if i!=0:
if PLOT_ON_OFF:
f.suptitle('orange "X" = original masked spots \t white "X" = masked spots when blending at bthresh=%.3i\nblack "o" = masked spots after raising non-poly cuts to bthresh=%.3f' % (bthresh1,bthreshs[iterN],))
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f=imagetools.AxesCompact(f,.1)
f.savefig(plotdir+'pltRevise%s_anti-drop_%s-%slabel_group_num%.3i' % (OFB,bthresh_tag,antidrop_extras,count))
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
f=figure(figsize=(22,13.625))
antidrop_extras=''
count+=1
try:
sl=CRblendeds_slices[probl-1]
except IndexError:
iterN_stats[iterN]['DONE-swallowed']+=1 #adam: hit this once, it'll just say continue and hope this doesn't make problems later! (check ~/my_data/SUBARU/RXJ2129/W-C-RC_2012-07-23/SCIENCE/SUPA7-23/SCIENCE/SUPA013516 _6OCF.fits)
continue
if sl==None: #if this label was swallowed by another label, then skip it!
iterN_stats[iterN]['DONE-swallowed']+=1
continue
#iter2: RING now do the ringer thing!
sl2=imagetools.slice_expand(sl,3)
iterNmask=CRblendeds[iterN][sl2]==probl #change this so plot looks right
if not iterNmask.any(): #if this label was swallowed by another label, then skip it!
iterN_stats[iterN]['DONE-swallowed']+=1
continue
holefilledpixels=count_hole_filled_pixels(iterNmask)
run_ring_bool= holefilledpixels>holefilledpixels_cut
'''
THESIS: if more than 8 pixels in holes, run the ringer, if ringstat=0, it's a good ring, let's keep it!
'''
if run_ring_bool:
if PLOT_ON_OFF: newring,ringstat=ringer(spots_ringer=iterNmask.copy(),l_ringer=probl,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
else: newring,ringstat=ringer_noplot(spots_ringer=iterNmask.copy(),l_ringer=probl,filtstamp_ringer=CRfiltimage[sl2],imstamp_ringer=image[sl2],seg0stamp_ringer=CRseg0[sl2],star_stamp=starbools[sl2])
if ringstat==0:
CRblendeds[iterN][sl2][iterNmask]=0
CRblendeds[iterN][sl2][newring]=probl
title_extras+=" Used Ring(stat=0)"
iterNmask=newring
holefilledpixels=count_hole_filled_pixels(iterNmask)
else:
title_extras+=" Used Ring(stat!=0)"
else:ringstat=0
#iter2: get needed info for the BLOB cut!
blended_only_spots= iterNmask.copy()
open8_blended_only_spots=scipy.ndimage.binary_opening(blended_only_spots,conn8)
openS_blended_only_spots=scipy.ndimage.binary_opening(blended_only_spots,connS)
open8_Nspots= open8_blended_only_spots.sum();openS_Nspots=openS_blended_only_spots.sum()
del open8_blended_only_spots,openS_blended_only_spots
#iter2: STRETCH now do the iter_track_stretch thing!
'''
THESIS: this fits a line to `cosmics` and stretches the mask along the line.
THESIS: if the line fit is decent, then it determines if any of the pixels included from the stretching have counts in `CRfiltstamp` above `thresh`.
THESIS: If they do, then those pixels are included in the final mask and it returns `cosmics_final,1`, else it returns `cosmics,0`
THESIS: process is continued iteratively within iter_track_stretch
'''
slE=imagetools.slice_expand(sl,100)
if bthreshs[iterN]>=80:
iterNmaskE=CRblendeds[iterN][slE]==probl
cosmics,stretch_count=iter_track_stretch(iterNmaskE.copy(),CRfiltimage[slE] ,bthreshs[iterN]-20,BASE,probl,starbools[slE],name_extras="_ADloop",rr_per_step=.1,ts_rr_cut=1.0,track_len_cut=13)
if stretch_count:
stretch_pixels=cosmics*logical_not(iterNmaskE)
stretch_unnecessary=(CRblendeds[iterN][slE].copy()>0) * stretch_pixels
print "number of stretched pixels already covered=",stretch_unnecessary.sum()," of total ",stretch_pixels.sum()
stretch_necessary=stretch_pixels * logical_not(stretch_unnecessary)
CRblendeds[iterN][slE][stretch_necessary]=probl
title_extras+=" Used Stretch"
#iter2: do the plotting by using a square slice within the slE slice!
slsq=imagetools.slice_square(scipy.ndimage.find_objects(asarray(CRblendeds[iterN][slE]==probl,dtype=int))[0])
slsq3=imagetools.slice_expand(slsq,3)
stamp=image[slE][slsq3]
iter1mask=BBCRseg[slE][slsq3]==probl
iterNmask_slsq3=CRblendeds[iterN][slE][slsq3]==probl #this is iterNmask, but in the slsq3 form
iter0mask=iterNmask_slsq3*(CRseg0[slE][slsq3]>0)
yy0,xx0=nonzero(iter0mask)
masksize0=len(xx0)
#iter2: determine if iter2 detections pass the iter3 cuts or not
masksize=iterNmask_slsq3.sum()
open8_frac=float(open8_Nspots)/masksize
if PLOT_ON_OFF:
ax=f.add_subplot(3,3,Nax)
yy1,xx1=nonzero(iter1mask)
ax.scatter(xx1,yy1,marker='x',color='w',lw=.5,alpha=.5)
ax,rr_i=cosmicpoly(probl,iterNmask_slsq3,stamp,ax,marker='s',s=40)
if isnan(rr_i):
ax.set_title('label %s: rr=nan' % (probl,))
yyN,xxN=nonzero(iterNmask_slsq3)
ax.imshow(stamp,interpolation='nearest',origin='lower left')
ax.scatter(xxN,yyN,marker='o',edgecolors='k',facecolors='None')
ax.scatter(xx0,yy0,marker='x',color='orange',s=50)
ax.set_ylim(0,slsq3[0].stop-slsq3[0].start);ax.set_xlim(0,slsq3[1].stop-slsq3[1].start)
else:
rr_i,poly_i,polytype_i=polyfitter(iterNmask_slsq3,degree=5)
#START: PREVsplit
'''
THESIS: if you're fragmenting the mask into more pieces this time, then choose the previous one
'''
autopass=False
if not (run_ring_bool and ringstat==0): #if we didn't successfully run the ringer function
#check if the mask has been split into 2 pieces
iterPREVmask_slsq3=CRblendeds[iterN-1][slE][slsq3]==probl #this is iterNmask, but for the N-1 iteration
contig_checkseg,contig_check_NlabelsN=scipy.ndimage.label(iterNmask_slsq3,conn8)
contig_checkseg,contig_check_NlabelsPREV=scipy.ndimage.label(iterPREVmask_slsq3,conn8)
names="iterN=",iterN,"probl=",probl,"contig_check_NlabelsN=",contig_check_NlabelsN,"contig_check_NlabelsPREV=",contig_check_NlabelsPREV
del contig_checkseg
if contig_check_NlabelsN>contig_check_NlabelsPREV: #if label has been split-up take the last one
Nopen8_iterPREVmask_slsq3=scipy.ndimage.binary_opening(iterPREVmask_slsq3,conn8).sum()
Ntotal_iterPREVmask_slsq3=iterPREVmask_slsq3.sum()
open8_frac_PREV=float(Nopen8_iterPREVmask_slsq3)/Ntotal_iterPREVmask_slsq3
if open8_frac<=.3 and open8_frac_PREV<open8_frac+.2:
#open8_iterPREVmask_slsq3=scipy.ndimage.binary_opening(iterPREVmask_slsq3,conn8)
#iterPREV_8less=iterPREVmask_slsq3-open8_iterPREVmask_slsq3
#contig_checkseg,contig_check_NlabelsPREV_8less=scipy.ndimage.label(iterPREV_8less,conn8)
#if contig_check_NlabelsN>contig_check_NlabelsPREV_8less: #if label has been split-up take the last one
iterN_stats[iterN]['DONE-PREVsplit']+=1
CRblendeds[iterN][slE][slsq3][iterPREVmask_slsq3]=probl
iterNmask_slsq3=iterPREVmask_slsq3
if PLOT_ON_OFF:ax.set_title('label=%s DONE!!! PREV declared iterN-1 better!\nI dont want to break this up into more pieces!' % (probl))
print ('label=%s DONE!!! PREV declared iterN-1 better!\nI dont want to break this up into more pieces!' % (probl))
antidrop_extras+='PREVsplit-'
autopass=True
#END: PREVsplit
if not autopass and ((ringstat=="Circle of Cosmics" or ringstat=="none in square pattern") and iterN>=3 and open8_frac<.2):
'''
THESIS: if ringer found a circle of cosmics => DONE
'''
iterN_stats[iterN]['DONE-Multiple Ringers']+=1
more_title_extras="DONE!!! Circle of Cosmics rr=%.2f size=%s sum(8)=%s sum(S)=%s open8_frac=%.2f<.2" % (rr_i, iterNmask_slsq3.sum(), open8_Nspots, openS_Nspots,open8_frac)
antidrop_extras+='CosmicCircle-'
elif not autopass and (open8_frac>.03 and rr_i>rr_iterN_cut):
'''
THESIS: if more than 3% of the pixels in cosmics are open8 pixels, and doesn't match poly5 well, then cosmics is a blob, so NEXT
'''
iterN_stats[iterN]['NEXT-rr']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append("rr=%.2f>%.2f open8_frac=%.2f>.03" % (rr_i,rr_iterN_cut,open8_frac))
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (ringstat!=0 and holefilledpixels>holefilledpixels_cut):
'''
THESIS: if ring failed and still hole pixels >8 => NEXT
'''
iterN_stats[iterN]['NEXT-ring failed']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append(ringstat)
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (open8_Nspots>open_cut and openS_Nspots>open_cut and rr_i>open_rr_cut):
'''
THESIS: if (poly of degree 5 has (R^2/#pts)>=0.8) AND (# openS spots>11) => NEXT
'''
iterN_stats[iterN]['NEXT-open']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append("sum(S)=%s>%s sum(8)=%s>%s rr=%.2f>%.2f" % (openS_Nspots,open_cut,open8_Nspots,open_cut,rr_i,open_rr_cut))
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (isnan(rr_i) and masksize>9 and not open8_Nspots):
iterN_stats[iterN]['NEXT-rr=nan size>9 open8=0']+=1
cut_labels[iterN+1].append(probl)
cut_details[iterN+1].append("rr=nan size>9 open8_Nspots=0")
more_title_extras=('this iter (%s of %s) details: ' % (iterN+1,iterN_final))+cut_details[iterN+1][-1]
elif not autopass and (isnan(rr_i) and masksize>9 and masksize0<3 and open8_frac>.6): #if not autopass and (this is true then it might be a star!
'''
THESIS: if fit fails, and mask area>9 pixels, and open8_frac>.6, and it's all one connected piece, and (peak pix) (2nd pix) and (3rd pix) are all neighbors => ERASE Mask!
'''
#make sure that the original mask pixels are all 4-connected and that the mask isn't in 2 pieces
contig_checkseg,contig_check_NlabelsN=scipy.ndimage.label(iterNmask_slsq3,conn8)
contig_checkseg,contig_check_Nlabels0=scipy.ndimage.label(iter0mask,conn8)
del contig_checkseg
contig_check_Nlabels=max(contig_check_NlabelsN,contig_check_Nlabels0)
#make sure that the hottest pixel and the 2nd hottest are next to one another and both are 8 connected with the 3rd hottest
stampmax=stamp[iterNmask_slsq3].max()
maxspot=stamp==stampmax
stamp_no_max=stamp.copy();stamp_no_max[maxspot]=0;stamp_no_max[logical_not(iterNmask_slsq3)]=0
maxspot2=stamp_no_max==stamp_no_max.max()
max_and_2nd_next=sum(maxspot2*binary_dilation(maxspot))>0
max_or_2=maxspot2+maxspot
stamp_no_max_or_2=stamp.copy();stamp_no_max_or_2[max_or_2]=0;stamp_no_max_or_2[logical_not(iterNmask_slsq3)]=0
maxspot3=stamp_no_max_or_2==stamp_no_max_or_2.max()
max_and_2nd_next_to_3rd=sum(max_or_2*binary_dilation(maxspot3,conn8))>1
if max_and_2nd_next and max_and_2nd_next_to_3rd and contig_check_Nlabels==1: #if this is true then it might be a star! (this should drastically reduce the "pac-man effect"!)
iterN_stats[iterN]['REMOVED-ERASE']+=1
more_title_extras="ERASED!!! (should be star) rr=nan open8_frac=%.2f>.6 Nlabels=1" % (open8_frac,)
more_title_extras+="\nPASSED all OF: max_and_2nd_next=%s max_and_2nd_next_to_3rd=%s" % (max_and_2nd_next,max_and_2nd_next_to_3rd ) #if this is true then it might be a star! (this should drastically reduce the "pac-man effect"!)
star_ERASE[slE][slsq3][iterNmask_slsq3]=1
else:
iterN_stats[iterN]['DONE-ERASE FAILED']+=1
more_title_extras="DONE!!! Didn't Pass ERASE (not star) rr=nan open8_frac=%.2f>.6" % (open8_frac,)
more_title_extras+="\nFAILED one OF:Nlabels=%s>1 max_and_2nd_next=%s max_and_2nd_next_to_3rd=%s" % (contig_check_Nlabels,max_and_2nd_next,max_and_2nd_next_to_3rd )
else:
'''
THESIS: else it passes and it's removed from the iterative loop raising the thresholds!
'''
iterN_stats[iterN]['DONE-PASSED ALL']+=1
more_title_extras="DONE!!! rr=%.2f iterNmask.sum()=%s sum(8)=%s sum(S)=%s open8_frac=%.2f" % (rr_i, iterNmask_slsq3.sum(), open8_Nspots, openS_Nspots,open8_frac)
#START: PREV
#PREV: check how the mask compares to the old mask!
######## else:
######## #check and see if the deleted part was clean (not conn8,etc.)!
######## PREV_removed=iterPREVmask_slsq3-iterNmask_slsq3
######## PREV_important=PREV_removed-(PREV_removed*mahotas.sobel(iterNmask_slsq3))
######## open8_PREV_important=scipy.ndimage.binary_opening(PREV_important,conn8)
######## N_PREV_important=float(PREV_important.sum())
######## N8_PREV_important=float(open8_PREV_important.sum())
######## open8_frac_PREV=N8_PREV_important/N_PREV_important
######## if open8_frac_PREV<.5 and (N_PREV_important-N8_PREV_important)>3:
######## PREV_good_removal=scipy.ndimage.binary_opening(PREV_important,conn8)+scipy.ndimage.binary_opening(PREV_important,connS)
######## PREV_putback=PREV_important-PREV_good_removal
######## skimage.morphology.remove_small_objects(PREV_putback,3,connectivity=2,in_place=True) #conn8
######## if PREV_putback.sum()>3:
######## PREV_seg,N_PREV_segs=scipy.ndimage.label(PREV_putback,conn8)
######## around_iterN=scipy.ndimage.binary_dilation(iterNmask_slsq3,conn8)
######## PREV_segs_nearby=unique(PREV_seg[around_iterN]).tolist()
######## try:PREV_segs_nearby.remove(0)
######## except ValueError:pass
######## if PREV_segs_nearby:
######## Nmask_old=iterNmask_slsq3.copy()
######## add_xx,add_yy=[],[]
######## for PREV_l in PREV_segs_nearby:
######## add_l=PREV_seg==PREV_l
######## l_yy,l_xx=nonzero(add_l)
######## add_xx+=l_xx.tolist()
######## add_yy+=l_yy.tolist()
######## iterNmask_slsq3[add_l]=True
######## print "added %s to label=%s" % (add_l.sum(),probl)
######## #now add the labels from #PREV_segs_nearby to the mask
######## f_PREV=figure(figsize=(10,12))
######## ax_PREV=f_PREV.add_subplot(111)
######## ax_PREV.imshow(stamp,interpolation='nearest',origin='lower left')
######## yy_PREV,xx_PREV=nonzero(Nmask_old)
######## ax_PREV.scatter(xx_PREV,yy_PREV,marker='x',s=60,edgecolors='w',facecolors='none',label='Nmask_old')
######## ax_PREV.scatter(add_xx,add_yy,s=70,marker='x',edgecolors='purple',facecolors='none',label='actually put back')
######## #yy_PREV,xx_PREV=nonzero(PREV_important)
######## #scatter(xx_PREV,yy_PREV,marker='x',edgecolors='w',facecolors='none',label='PREV_important')
######## yy_PREV,xx_PREV=nonzero(PREV_good_removal)
######## ax_PREV.scatter(xx_PREV,yy_PREV,marker='o',edgecolors='k',facecolors='k',label='PREV_good_removal')
######## yy_PREV,xx_PREV=nonzero(PREV_putback)
######## ax_PREV.scatter(xx_PREV,yy_PREV,s=70,marker='s',edgecolors='purple',facecolors='none',label='PREV_putback')
######## legend()
######## f_PREV.suptitle('pltRevise%s_PREV-%s-label%.4i' % (OFB,bthresh_tag,probl))
######## f_PREV=imagetools.AxesCompact(f_PREV,.1)
######## f_PREV.savefig(plotdir+'pltRevise%s_PREV-%s-label%.4i' % (OFB,bthresh_tag,probl))
######## antidrop_extras+='PREVputback-'
#END: PREV
if PLOT_ON_OFF: ax.set_title(ax.get_title()+'\n'+more_title_extras+title_extras+('\nlast iter (%s of %s) details: ' % (iterN,iterN_final))+cut_details[iterN][i],size=10)
if PLOT_ON_OFF:
f.suptitle('orange "X" = original masked spots \t white "X" = masked spots when blending at bthresh=%.3i\nblack "o" = masked spots after raising non-poly cuts to bthresh=%.3f' % (bthresh1,bthreshs[iterN],))
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f=imagetools.AxesCompact(f,.1)
f.savefig(plotdir+'pltRevise%s_anti-drop_%s-%slabel_group_num%.3i' % (OFB,bthresh_tag,antidrop_extras,count))
antidrop_extras=''
if PLOT_ON_OFF_SHOW:pass
else: close(f);del f
#ERASE the removed stars
CRblendeds[iterN_final][star_ERASE]=0
#iter2: this is it, all I need to do is to reset anything that's filled. Just to be safe
BBCRblend_comparable=CRblendeds[iterN_final].copy()
BBCRblend_comparable=asarray(BBCRblend_comparable,dtype=int)
#Save Erased Stars
hdu=astropy.io.fits.PrimaryHDU(asarray(star_ERASE,dtype=int))
hdu.header=CRfiltheader
fl_erase=compare_dir+'BB_ERASED_'+bthresh1_tag+'_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_erase,overwrite=True)
files2check.append(fl_erase)
#RING: plot rings, the "after" version
if PLOT_ON_OFF:
f=plotlabels(fillLL,segments=BBCRblend_comparable,slices=BBCRslices,params=params)
f.savefig(plotdir+'pltRevise%s_bad_labels-holes_2after' % (OFB,))
if PLOT_ON_OFF_SHOW:show()
else: close(f);del f
#END: iter2
'''
THESIS-LastStretch: fit a line for each cosmic and connect any close co-linear tracks
'''
#START: LastStretch
tLS=time.time()
#last step should be to fit a line for each cosmic and connect any close co-linear tracks
LastStretchmask=CRblendeds[iterN_final].copy()>0
LastStretchseg,LastStretch_Nlabels=scipy.ndimage.label(LastStretchmask,conn8)
LastStretchslices=scipy.ndimage.find_objects(LastStretchseg)
LastStretchregs=skimage.measure.regionprops(LastStretchseg)
LastStretcharea=array([LastStretchregs[i].area for i in range(LastStretch_Nlabels)])
LastStretchlabels=arange(1,LastStretch_Nlabels+1)
BIGll=LastStretchlabels[LastStretcharea>6]
LastStretch_rr_cut=1.8
LastStretch_Ncosmics_added=0
LastStretch_Npixels_added=[]
for l in BIGll:
sl_l=imagetools.slice_expand(LastStretchslices[l-1],20)
seg_l=LastStretchseg[sl_l]
spots=seg_l==l
yy_lin,xx_lin=nonzero(spots)
track_length=sqrt((xx_lin.max()-xx_lin.min())**2+(yy_lin.max()-yy_lin.min())**2)
xx_plot=arange(xx_lin.min(),xx_lin.max(),.1)
yy_plot=arange(yy_lin.min(),yy_lin.max(),.1)
rr,poly,polytype=polyfitter(spots,1)
if rr<LastStretch_rr_cut and track_length>5:
ayy,axx=nonzero((seg_l>0)*logical_not(spots))
if polytype=='x_of_y':
aX=poly(ayy)
aOffsets=(axx-aX).__abs__()
elif polytype=='y_of_x':
aY=poly(axx)
aOffsets=(ayy-aY).__abs__()
extend_track_spots=aOffsets<LastStretch_rr_cut
Npixels=extend_track_spots.sum()
if Npixels:
LastStretch_Ncosmics_added+=1
LastStretch_Npixels_added.append(Npixels)
star_stamp=starbools[sl_l]
stretched_spots,stretch_count=iter_track_stretch(spots.copy(),CRfiltimage[sl_l] ,dt_times_pt01,BASE,l,star_stamp,name_extras='_LastStretch',ts_rr_cut=1.8,rr_per_step=.2)
fill_these=LastStretchseg[sl_l][stretched_spots]==0
LastStretchseg[sl_l][stretched_spots][fill_these]=l
LastStretch_Npixels_added=asarray(LastStretch_Npixels_added)
#END: LastStretch
#START: CR/star overlap
'''
THESIS-LastStretch: remove any pixels that might have gone through stars in LastStretch!
'''
tOverlap=time.time()
#setup final masks which include the CR/star overlap
BBCRmask_final=LastStretchseg>0
BBCRmask_final[CRoverlapSTAR]=True #put spots where CRseg0 and starseg overlap back into final mask (this will mainly include more saturation spikes)
BBCRimage_final=image.copy()
BBCRimage_final[BBCRmask_final]=0 #CRs=0 and CRseg0/starseg overlap=0 too
#plot this to make sure I'm not making an aweful mistake
BBCRimage_plot_comp=image.copy()
BBCRimage_plot_comp[LastStretchseg>0]=0 #just the CRs=0
if PLOT_ON_OFF:
f=imagetools.ImageWithSpots([BBCRimage_plot_comp,BBCRimage_final],name1='image with masks from before the CR-star overlap was replaced', name2='image with CR-star overlap masked',mode='alpha')
f.savefig(plotdir+'pltRevise%s_CR-star_overlap' % (OFB,))
close(f);del f
#END: CR/star overlap
#START: 400
t400=time.time()
#now add on the stuff that you only pick-up with a very low threshold (mainly for the low seeing objects)
CR_filtfl_ft400=CR_filtfl.replace('_CRN-cosmics','_FT400_CRN-cosmics')
CRfilt_ft400=imagetools.GetImage(CR_filtfl_ft400)
BBCRmask_final_copy=BBCRmask_final.copy()
CR400=CRfilt_ft400>400
CRseg_400_start,CR_400_Nlabels=scipy.ndimage.label(CR400,conn8)
CRslices_400=scipy.ndimage.find_objects(CRseg_400_start)
CRregs_400=skimage.measure.regionprops(CRseg_400_start,intensity_image=CRfilt_ft400)
maxval_400=array([CRregs_400[i].max_intensity for i in range(CR_400_Nlabels)])
eccentricity_400=array([CRregs_400[i].eccentricity for i in range(CR_400_Nlabels)])
area_400=array([CRregs_400[i].area for i in range(CR_400_Nlabels)])
CRll_400=arange(CR_400_Nlabels)+1
ok_label_400=[]
s2t_400=[]
BBCR_frac_400=[]
for l,size_l in zip(CRll_400,area_400):
sl=imagetools.slice_expand(CRslices_400[l-1],2)
spots=CRseg_400_start[sl]==l
sl2_height,sl2_width=spots.shape
yy,xx=nonzero(spots)
spots_beside_track=scipy.ndimage.binary_dilation(spots,conn4)*logical_not(spots)
beside_track_mean=(image[sl][spots_beside_track]-back_im).mean()
track_mean=(image[sl][spots]-back_im).mean()
side2track_ratio=beside_track_mean/track_mean
s2t_400.append(side2track_ratio)
BBCR_frac_400.append(BBCRmask_final_copy[sl][spots].mean())
if sl2_width<6 and sl2_height>200 and (sl2_height/sl2_width)>25:ok_label_400.append(False) #get rid of saturation spikes
elif starbools[sl][spots].any():ok_label_400.append(False)
elif (xx==xx[0]).all():ok_label_400.append(False)#get rid of str8 up and down stuff!
else:ok_label_400.append(True)
BBCR_frac_400=array(BBCR_frac_400)
s2t_400=array(s2t_400)
ok_label_400=array(ok_label_400)
s2t_400_cutval=.33 #was .4
eccentricity_400_cutval=.88 #was .88
area_400_cutval=5 #was 6
maxval_400_cutval=2000.0 #was 2000
standard_cut_400=ok_label_400*(s2t_400<s2t_400_cutval)*(eccentricity_400>eccentricity_400_cutval)*(area_400>area_400_cutval)*(maxval_400>maxval_400_cutval)
fives_cut_400=ok_label_400*(eccentricity_400>.91)*(area_400==5)*(maxval_400>3500)*(s2t_400<s2t_400_cutval) #was without s2t cut
fours_cut_400=ok_label_400*(eccentricity_400>.95)*(area_400==4)*(maxval_400>3500)*(s2t_400<s2t_400_cutval) #was without s2t cut
all_cut_400=standard_cut_400+fives_cut_400+fours_cut_400#+brighter_circular_cut_400
CRseg_400_final=CRseg_400_start.copy()
for l in CRll_400[logical_not(all_cut_400)]:
sl=CRslices_400[l-1]
spots=CRseg_400_final[sl]==l
CRseg_400_final[sl][spots]=0
for l in CRll_400[all_cut_400]:
sl=CRslices_400[l-1]
sl_l=imagetools.slice_expand(sl,25)
spots=CRseg_400_final[sl_l]==l
star_stamp=starbools[sl_l]
try:stretched_spots,stretch_count=iter_track_stretch(spots.copy(),CRfilt_ft400[sl_l] ,dt_times_pt01*2,BASE,l,star_stamp,name_extras='_400',rr_per_step=.25)
except ValueError:continue
if stretch_count:
BBCR_frac_l=BBCRmask_final_copy[sl_l][stretched_spots].mean()
if BBCR_frac_l<BBCR_frac_400[l-1]: #only update things if it's better
BBCR_frac_400[l-1]=BBCR_frac_l
CRseg_400_final[sl_l][stretched_spots]=l
CRslices_400[l-1]=sl_l
#params=["e=%.2f max=%.1f frac_done=%.2f\ns2t=%.2f (.35 cut)" % (ecc,maxval,fc,s2t) for ecc, maxval,fc,s2t in zip(eccentricity_400,maxval_400,BBCR_frac_400,s2t_400)]
params=["e=%.2f max=%.1f\ns2t=%.2f (<.33)" % (ecc,maxval,s2t) for ecc,maxval,s2t in zip(eccentricity_400,maxval_400,s2t_400)]
tryitllS=CRll_400[standard_cut_400*(BBCR_frac_400<.9)]
if len(tryitllS) and PLOT_ON_OFF:
f=plotlabels(tryitllS,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_standard_cut_400' % (OFB,))
close(f);del f
#GalFix# does the s2t cut do anything?
standard_cut_400_NOT_s2t=ok_label_400*(s2t_400>s2t_400_cutval)*(eccentricity_400>eccentricity_400_cutval)*(area_400>area_400_cutval)*(maxval_400>maxval_400_cutval)
tryitllS_NOT_s2t=CRll_400[standard_cut_400_NOT_s2t*(BBCR_frac_400<.9)]
if len(tryitllS_NOT_s2t) and PLOT_ON_OFF:
f=plotlabels(tryitllS_NOT_s2t,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_standard_cut_400_NOT_s2t' % (OFB,))
close(f);del f
tryit=fives_cut_400*logical_not(standard_cut_400)*(BBCR_frac_400<.9)
tryitll5=CRll_400[tryit]
if len(tryitll5) and PLOT_ON_OFF:
f=plotlabels(tryitll5,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_fives_cut_400' % (OFB,))
close(f);del f
tryit=fours_cut_400*logical_not(standard_cut_400)*(BBCR_frac_400<.9)
tryitll4=CRll_400[tryit]
if len(tryitll4) and PLOT_ON_OFF:
f=plotlabels(tryitll4,segments=CRseg_400_final,slices=CRslices_400,params=params,background=image)
f=imagetools.AxesStripText(f,allticks=True,titles=False)
f.savefig(plotdir+'pltRevise%s_extras_fours_cut_400' % (OFB,))
close(f);del f
ll_400_final=tryitll4.tolist()+tryitll5.tolist()+tryitllS.tolist()
totally_new_400=0
for l in ll_400_final:
fc=BBCR_frac_400[l-1]
if fc==0.0: totally_new_400+=1
#END: 400
#START: save results
tsave=time.time()
FINALmask=BBCRmask_final.copy()
for l in ll_400_final:
sl=CRslices_400[l-1]
spots=CRseg_400_final[sl]==l
FINALmask[sl][spots]=True
FINALimage=image.copy()
FINALimage[FINALmask]=0 #CRs=0 and CRseg0/starseg overlap=0 too
FINALseg,FINAL_Nlabels=scipy.ndimage.label(FINALmask,conn8)
hdu=astropy.io.fits.PrimaryHDU(FINALimage)
hdu.header=CRfiltheader
fl_revised=compare_dir+'BBrevised_'+bthresh1_tag+'_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE)
hdu.writeto(fl_revised,overwrite=True)
files2check.append(fl_revised)
#files2check.append(compare_dir+'BBrevised_bfrac0pt0100_BBCR_%s_%s.%s.fits' % (OBJECT,FILTER,BASE))
#save output mask for bonnpipeline code
CR_newsegfl=CR_segfl.replace('SEGMENTATION_CRN-cosmics','SEGMENTATION_BB_CRN-cosmics')
hdu=astropy.io.fits.PrimaryHDU(FINALseg)
hdu.header=CRfiltheader
hdu.writeto(CR_newsegfl ,overwrite=True)
tend=time.time()
#END: save results
#START: print stats!
times_start=asarray([t0, t1, t2, t3, t4, t5, t6, t7, t8, tLS, tOverlap, t400, tsave, tend])
things=['iter0','iter1','iter2','iter3','iter4','iter5','iter6','iter7','iter8','LastStretch','CRonSTAR','FT400','SAVE']
times_took=(times_start[1:]-times_start[:-1])/60.0
time_total=(tend-t0)/60.0
time_percent=times_took/time_total*100
thing_times=[str(round(tt,2)) for tt in times_took]
thing_time_percent=["("+str(round(tt,0))+"%)" for tt in time_percent]
end_str_print=''
#**set PLOT_ON_OFF=1**
BBstat_str="|***$$$~~~: "+"BB stats for the file="+fl+" :***$$$~~~|"
BBstat_len=len(BBstat_str)-2
BBstat_details="|***$$$~~~: MYSEEING=%.2f EXPTIME=%i RMS=%.2f " % (header['MYSEEING'],header['EXPTIME'],rms)
nl= BBstat_details+" %"+str(BBstat_len-len(BBstat_details)-10)+"s"
detections1=BBCRseg>0
end_str_print+= "\n"+"|"+"-"*BBstat_len+"|"+"\n"+BBstat_str+"\n"+nl % (" ")+":***$$$~~~|"+"\n|"+"-"*BBstat_len+"|"
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| iter0| # cosmics before blending :"+ str(CRseg0.max())
end_str_print+= "\n"+"| iter0| # masked pixels before blending :"+ str(detections0.sum())+ " %="+ str(detections0.mean())
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| iter1| # cosmics after blending #1 :"+ str(BBCRseg.max())
end_str_print+= "\n"+"| iter1| # masked pixels after blending #1 :"+ str(detections1.sum())+" %="+ str(detections1.mean())
end_str_print+= "\n"+"| iter1| # that are big enough (area>8) to be considered in raise thresh cut:"+ str(big_enough.sum())+" %="+ str( big_enough.mean())
end_str_print+= "\n"+"| iter1| # with large holes that will be sent to raise thresh cut:"+ str(hole_cuts.sum())+ " of those this many were fixed:"+ str(Nring_fixed1)
end_str_print+= "\n"+"| iter1| # with bad rr not great rr and open8 and openS ringer failed (i.e. masks considered in iterN):"+ str(len(cut_labels2))
end_str_print+= "\n|\n| iterN| iterations 2 thru 8 "
done_keys=asarray(["DONE-Multiple Ringers", "DONE-swallowed", "DONE-PASSED ALL", "DONE-PREVsplit", "DONE-ERASE FAILED"])
next_keys=asarray([ "NEXT-open", "NEXT-ring failed", "NEXT-rr", "NEXT-rr=nan size>9 open8=0"])
iterN_stats_all={"DONE-swallowed":0,"DONE-PREVsplit":0,"DONE-Multiple Ringers":0,"NEXT-rr":0,"NEXT-ring failed":0,"NEXT-open":0,"NEXT-rr=nan size>9 open8=0":0,"REMOVED-ERASE":0,"DONE-ERASE FAILED":0,"DONE-PASSED ALL":0}
done_all=0; next_all=0
for iterN in range(2,iterN_final+1):
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
done=0; next=0
for key in sort(iterN_stats[iterN].keys()):
iterN_stats_all[key]+=iterN_stats[iterN][key]
if 'DONE' in key: done+=iterN_stats[iterN][key]
if 'NEXT' in key: next+=iterN_stats[iterN][key]
done_all+=done;next_all+=next
done_str_total="| iter%s| %s DONE: " % (iterN,done)
removed_str_total="| iter%s| %s REMOVED-ERASED STAR CANDIDATES " % (iterN,iterN_stats[iterN]['REMOVED-ERASE'])
next_str_total="| iter%s| %s NEXT: " % (iterN,next)
done_vals=asarray([iterN_stats[iterN][key] for key in done_keys])
next_vals=asarray([iterN_stats[iterN][key] for key in next_keys])
done_str_pieces=["("+str(i+1)+": "+dk.replace("DONE-","")+") == "+str(iterN_stats[iterN][dk]) for i,dk in enumerate(done_keys[done_vals.argsort()[::-1]])]
done_str=done_str_total+" ".join(done_str_pieces)
next_str_pieces=["("+str(i+1)+": "+dk.replace("NEXT-","")+") == "+str(iterN_stats[iterN][dk]) for i,dk in enumerate(next_keys[next_vals.argsort()[::-1]])]
next_str=next_str_total+" ".join(next_str_pieces)
end_str_print+= "\n|"+next_time_line+"|\n"+done_str
end_str_print+= "\n"+next_str
end_str_print+= "\n"+removed_str_total
else:
end_str_print+= "\n|\n| iterN| iterations 2 thru 8 totals (NEXT stats aren't all that meaningful here)"
done_str_total="| iter%s| %s DONE: " % ("N",done_all)
removed_str_total="| iter%s| %s REMOVED-ERASED STAR CANDIDATES " % ("N",iterN_stats_all["REMOVED-ERASE"])
next_str_total="| iter%s| %s NEXT: " % ("N",next_all)
done_vals=asarray([iterN_stats_all[key] for key in done_keys])
next_vals=asarray([iterN_stats_all[key] for key in next_keys])
done_str_pieces=["("+str(i+1)+": "+dk.replace("DONE-","")+") == "+str(iterN_stats_all[dk]) for i,dk in enumerate(done_keys[done_vals.argsort()[::-1]])]
done_str=done_str_total+' '.join(done_str_pieces)
next_str_pieces=["("+str(i+1)+": "+dk.replace("NEXT-","")+") == "+str(iterN_stats_all[dk]) for i,dk in enumerate(next_keys[next_vals.argsort()[::-1]])]
next_str=next_str_total+" ".join(next_str_pieces)
end_str_print+= "\n"+done_str
end_str_print+= "\n"+next_str
end_str_print+= "\n"+removed_str_total
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| LastStretch| Masked a total of this many cosmics: "+ str(LastStretch_Ncosmics_added)
end_str_print+= "\n"+"| LastStretch| of which an average of this # of pixels was added on: "+ str(LastStretch_Npixels_added.mean())
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| CRonSTAR| Masked a total of this many cosmics: "+ str(CRoverlapSTAR_Ncosmics_mask_at_end)
try:
end_str_print+= "\n"+"| CRonSTAR| of which an average of this # of pixels was added on: "+ str(CRoverlapSTAR_Npixels_mask_at_end/CRoverlapSTAR_Ncosmics_mask_at_end)
except ZeroDivisionError:pass
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| FT400| Masked a total of this many cosmics: "+ str(len(ll_400_final))
end_str_print+= "\n"+"| FT400| of which these many were totally new: "+ str(totally_new_400)
next_time_line = (' '.join([things.pop(0),'took',thing_times.pop(0),'minutes',thing_time_percent.pop(0)])).rjust(BBstat_len)
end_str_print+= "\n|"+next_time_line+"|\n| FINAL| Total cosmics+sat spikes masked: "+ str(FINAL_Nlabels)
TOTAL_BBCR=FINAL_Nlabels-CRoverlapSTAR_Ncosmics_mask_at_end
RATE_BBCR=TOTAL_BBCR/header["EXPTIME"]
end_str_print+= "\n"+"| FINAL| Total cosmics masked: "+str(TOTAL_BBCR)
end_str_print+= "\n"+"| FINAL| cosmics masked per second exposed: "+str(RATE_BBCR)
end_str_print+= "\n"+"|"+"-"*BBstat_len+"|"
#asciiable data
end_str_print+= "\n"+"BBSSCR_stats-BB %s %s %.2f %.2f %s %i %.2f %i %i %i %i %i" % (BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added)
#ascii_names=["BASE","FILTER","SEEING","RATE_BBCR","TOTAL_BBCR","EXPTIME","RMS","ERASED","TOTAL","CRonSTAR","FT400_new","LastStretch"]
#ascii_vals= (BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added)
#end_str_print+= "\n"+"ascii %s\t%s\t%.2f\t%.2f\t%s\t%i\t%.2f\t%i\t%i\t%i\t%i\t%i" % (BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added)
#end_str_print+= "\n"+"\nascii_BB", BASE,header["FILTER"],header["MYSEEING"],RATE_BBCR,TOTAL_BBCR,header["EXPTIME"],rms,iterN_stats_all["REMOVED-ERASE"],FINAL_Nlabels,CRoverlapSTAR_Ncosmics_mask_at_end,totally_new_400, LastStretch_Ncosmics_added
end_str_print+= "\n"+"\nds9 -zscale -tile mode column "+" ".join(files2check)+" -zscale -lock frame image -lock crosshair image -geometry 2000x2000 &"
end_str_print+= "\n"+"\ndone with file="+fl+"\n"+"$"*BBstat_len+"\n\n"
#END: end_str_print+= "\n"+stats!
print end_str_print
| {
"content_hash": "877bd9a2e488fc5c7678373ccdd9dd3e",
"timestamp": "",
"source": "github",
"line_count": 2361,
"max_line_length": 406,
"avg_line_length": 55.090216010165186,
"alnum_prop": 0.6794984162130577,
"repo_name": "deapplegate/wtgpipeline",
"id": "77ae9aa39c96aec1fa206bbea68451ece059b251",
"size": "130091",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "CRNitschke/blocked_blender.2.2.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Awk",
"bytes": "183"
},
{
"name": "C",
"bytes": "7161"
},
{
"name": "C++",
"bytes": "65083"
},
{
"name": "Makefile",
"bytes": "2574"
},
{
"name": "Perl",
"bytes": "38992"
},
{
"name": "Python",
"bytes": "13671330"
},
{
"name": "Roff",
"bytes": "48622"
},
{
"name": "Shell",
"bytes": "3637313"
},
{
"name": "XSLT",
"bytes": "54208"
}
],
"symlink_target": ""
} |
from flask import render_template, request
from app import app
# ROUTING/VIEW FUNCTIONS
@app.route('/')
@app.route('/index')
def index():
# Renders index.html.
return render_template('index.html')
# @app.route('/output')
# def cities_output():
# return render_template('output.html')
@app.route('/author')
def author():
# Renders author.html.
return render_template('author.html')
@app.route('/output',methods=["POST"])
def cities_output():
# get the query (city name)
city_name = request.form['cities']
return render_template('output.html',city_name=city_name)
@app.route('/about')
def about():
return render_template("about.html")
| {
"content_hash": "f469c7776d6866a9b0ad47f3b253d85a",
"timestamp": "",
"source": "github",
"line_count": 29,
"max_line_length": 58,
"avg_line_length": 22.896551724137932,
"alnum_prop": 0.6852409638554217,
"repo_name": "hongsups/insightfl_shin",
"id": "f586124df25cc5ad02d7beea4c01b7b16086d864",
"size": "664",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "6009"
},
{
"name": "HTML",
"bytes": "23877"
},
{
"name": "JavaScript",
"bytes": "18614"
},
{
"name": "Python",
"bytes": "6070"
}
],
"symlink_target": ""
} |
from cumulusci.core.config.sfdx_org_config import SfdxOrgConfig
from typing import Optional
import base64
import enum
import io
import json
import pathlib
import zipfile
from pydantic import BaseModel, validator
from simple_salesforce.exceptions import SalesforceMalformedRequest
from cumulusci.core.exceptions import DependencyLookupError, ServiceNotConfigured
from cumulusci.core.exceptions import PackageUploadFailure
from cumulusci.core.exceptions import TaskOptionsError
from cumulusci.core.utils import process_bool_arg
from cumulusci.salesforce_api.package_zip import BasePackageZipBuilder
from cumulusci.salesforce_api.package_zip import MetadataPackageZipBuilder
from cumulusci.salesforce_api.utils import get_simple_salesforce_connection
from cumulusci.core.sfdx import get_default_devhub_username
from cumulusci.tasks.salesforce.BaseSalesforceApiTask import BaseSalesforceApiTask
from cumulusci.tasks.salesforce.org_settings import build_settings_package
from cumulusci.utils import download_extract_github
class PackageTypeEnum(str, enum.Enum):
managed = "Managed"
unlocked = "Unlocked"
class VersionTypeEnum(str, enum.Enum):
major = "major"
minor = "minor"
patch = "patch"
class PackageConfig(BaseModel):
package_name: str
description: str = ""
package_type: PackageTypeEnum
org_dependent: bool = False
namespace: Optional[str]
version_name: str
version_type: VersionTypeEnum = VersionTypeEnum.minor
@validator("org_dependent")
def org_dependent_must_be_unlocked(cls, v, values):
if v and values["package_type"] != PackageTypeEnum.unlocked:
raise ValueError("Only unlocked packages can be org-dependent.")
return v
class CreatePackageVersion(BaseSalesforceApiTask):
"""Creates a new second-generation package version.
If a package named ``package_name`` does not yet exist in the Dev Hub, it will be created.
"""
api_version = "50.0"
task_options = {
"package_name": {"description": "Name of package"},
"package_type": {
"description": "Package type (Unlocked or Managed)",
"required": True,
},
"namespace": {"description": "Package namespace"},
"version_name": {"description": "Version name"},
"version_type": {
"description": "The part of the version number to increment. "
"Options are major, minor, patch. Defaults to minor"
},
"skip_validation": {
"description": "If true, skip validation of the package version. Default: false. "
"Skipping validation creates packages more quickly, but they cannot be promoted for release."
},
"org_dependent": {
"description": "If true, create an org-dependent unlocked package. Default: false."
},
"force_upload": {
"description": "If true, force creating a new package version even if one with the same contents already exists"
},
"static_resource_path": {
"description": "The path where decompressed static resources are stored. Any subdirectories found will be zipped and added to the staticresources directory of the build."
},
}
def _init_options(self, kwargs):
super()._init_options(kwargs)
self.package_config = PackageConfig(
package_name=self.options.get("package_name")
or self.project_config.project__package__name,
package_type=self.options.get("package_type")
or self.project_config.project__package__type,
org_dependent=self.options.get("org_dependent", False),
namespace=self.options.get("namespace")
or self.project_config.project__package__namespace,
version_name=self.options.get("version_name") or "Release",
version_type=self.options.get("version_type") or "minor",
)
self.options["skip_validation"] = process_bool_arg(
self.options.get("skip_validation") or False
)
self.options["force_upload"] = process_bool_arg(
self.options.get("force_upload") or False
)
def _init_task(self):
self.devhub_config = self._init_devhub()
self.tooling = get_simple_salesforce_connection(
self.project_config,
self.devhub_config,
api_version=self.api_version,
base_url="tooling",
)
def _init_devhub(self):
# Determine the devhub username for this project
try:
devhub_service = self.project_config.keychain.get_service("devhub")
except ServiceNotConfigured:
devhub_username = get_default_devhub_username()
else:
devhub_username = devhub_service.username
return SfdxOrgConfig({"username": devhub_username}, "devhub")
def _run_task(self):
"""Creates a new 2GP package version.
1. Create package if not found in Dev Hub.
2. Request creation of package version.
3. Wait for completion.
4. Collect package information as return values.
"""
# find existing package in Dev Hub, or create one if necessary
self.package_id = self._get_or_create_package(self.package_config)
self.return_values["package_id"] = self.package_id
# submit request to create package version
options = {
"package_type": self.package_config.package_type.value,
"namespace_inject": self.package_config.namespace,
"namespaced_org": self.package_config.namespace is not None,
}
if "static_resource_path" in self.options:
options["static_resource_path"] = self.options["static_resource_path"]
package_zip_builder = MetadataPackageZipBuilder(
path=self.project_config.default_package_path,
name=self.package_config.package_name,
options=options,
logger=self.logger,
)
self.request_id = self._create_version_request(
self.package_id,
self.package_config,
package_zip_builder,
self.options["skip_validation"],
)
self.return_values["request_id"] = self.request_id
# wait for request to complete
self._poll()
self.return_values["package2_version_id"] = self.package_version_id
# get the new version number from Package2Version
res = self.tooling.query(
"SELECT MajorVersion, MinorVersion, PatchVersion, BuildNumber, SubscriberPackageVersionId FROM Package2Version WHERE Id='{}' ".format(
self.package_version_id
)
)
package2_version = res["records"][0]
self.return_values["subscriber_package_version_id"] = package2_version[
"SubscriberPackageVersionId"
]
self.return_values["version_number"] = self._get_version_number(
package2_version
)
# get the new version's dependencies from SubscriberPackageVersion
res = self.tooling.query(
"SELECT Dependencies FROM SubscriberPackageVersion "
f"WHERE Id='{package2_version['SubscriberPackageVersionId']}'"
)
self.return_values["dependencies"] = res["records"][0]["Dependencies"]
self.logger.info("Created package version:")
self.logger.info(f" Package2 Id: {self.package_id}")
self.logger.info(f" Package2Version Id: {self.package_version_id}")
self.logger.info(
f" SubscriberPackageVersion Id: {self.return_values['subscriber_package_version_id']}"
)
self.logger.info(f" Version Number: {self.return_values['version_number']}")
self.logger.info(f" Dependencies: {self.return_values['dependencies']}")
def _get_or_create_package(self, package_config: PackageConfig):
"""Find or create the Package2
Checks the Dev Hub for an existing, non-deprecated 2GP package
with matching name, type, and namespace.
"""
message = f"Checking for existing {package_config.package_type} Package named {package_config.package_name}"
query = (
f"SELECT Id, ContainerOptions FROM Package2 WHERE IsDeprecated = FALSE "
f"AND ContainerOptions='{package_config.package_type}' "
f"AND IsOrgDependent={package_config.org_dependent} "
f"AND Name='{package_config.package_name}'"
)
if package_config.namespace:
query += f" AND NamespacePrefix='{package_config.namespace}'"
message += f" with namespace {package_config.namespace}"
else:
query += " AND NamespacePrefix=null"
self.logger.info(message)
try:
res = self.tooling.query(query)
except SalesforceMalformedRequest as err:
if "Object type 'Package2' is not supported" in err.content[0]["message"]:
raise TaskOptionsError(
"This org does not have a Dev Hub with 2nd-generation packaging enabled. "
"Make sure you are using the correct org and/or check the Dev Hub settings in Setup."
)
raise # pragma: no cover
if res["size"] > 1:
raise TaskOptionsError(
f"Found {res['size']} packages with the same name, namespace, and package_type"
)
if res["size"] == 1:
existing_package = res["records"][0]
if existing_package["ContainerOptions"] != package_config.package_type:
raise PackageUploadFailure(
f"Duplicate Package: {existing_package['ContainerOptions']} package with id "
f"{ existing_package['Id']} has the same name ({package_config.package_name}) "
"for this namespace but has a different package type"
)
package_id = existing_package["Id"]
self.logger.info(f"Found {package_id}")
return package_id
self.logger.info("No existing package found, creating the package")
Package2 = self._get_tooling_object("Package2")
package = Package2.create(
{
"ContainerOptions": package_config.package_type,
"IsOrgDependent": package_config.org_dependent,
"Name": package_config.package_name,
"Description": package_config.description,
"NamespacePrefix": package_config.namespace,
}
)
return package["id"]
def _create_version_request(
self,
package_id: str,
package_config: PackageConfig,
package_zip_builder: BasePackageZipBuilder,
skip_validation: bool = False,
dependencies: list = None,
):
# Prepare the VersionInfo file
version_bytes = io.BytesIO()
version_info = zipfile.ZipFile(version_bytes, "w", zipfile.ZIP_DEFLATED)
try:
# Add the package.zip
package_hash = package_zip_builder.as_hash()
version_info.writestr("package.zip", package_zip_builder.as_bytes())
if not self.options["force_upload"]:
# Check for an existing package with the same contents
res = self.tooling.query(
"SELECT Id "
"FROM Package2VersionCreateRequest "
f"WHERE Package2Id = '{package_id}' "
"AND Status != 'Error' "
f"AND SkipValidation = {str(skip_validation)} "
f"AND Tag = 'hash:{package_hash}' "
"ORDER BY CreatedDate DESC"
)
if res["size"] > 0:
self.logger.info(
"Found existing request for package with the same metadata. Using existing package."
)
return res["records"][0]["Id"]
# Create the package descriptor
# @@@ we should support releasing a successor to an older version by specifying a base version
last_version_parts = self._get_highest_version_parts(package_id)
version_number = self._get_next_version_number(
last_version_parts, package_config.version_type
)
package_descriptor = {
"ancestorId": "", # @@@ need to add this for Managed 2gp
"id": package_id,
"path": "",
"versionName": package_config.version_name,
"versionNumber": version_number,
}
# Add org shape
with open(self.org_config.config_file, "r") as f:
scratch_org_def = json.load(f)
for key in (
"country",
"edition",
"language",
"features",
"snapshot",
):
if key in scratch_org_def:
package_descriptor[key] = scratch_org_def[key]
# Add settings
if "settings" in scratch_org_def:
with build_settings_package(
scratch_org_def["settings"], self.api_version
) as path:
settings_zip_builder = MetadataPackageZipBuilder(path=path)
version_info.writestr(
"settings.zip", settings_zip_builder.as_bytes()
)
# Add the dependencies for the package
is_dependency = package_config is not self.package_config
if (
not (package_config.org_dependent or skip_validation)
and not is_dependency
):
self.logger.info("Determining dependencies for package")
dependencies = self._get_dependencies()
if dependencies:
package_descriptor["dependencies"] = dependencies
# Add package descriptor to version info
version_info.writestr(
"package2-descriptor.json", json.dumps(package_descriptor)
)
finally:
version_info.close()
version_info = base64.b64encode(version_bytes.getvalue()).decode("utf-8")
Package2CreateVersionRequest = self._get_tooling_object(
"Package2VersionCreateRequest"
)
request = {
"Package2Id": package_id,
"SkipValidation": skip_validation,
"Tag": f"hash:{package_hash}",
"VersionInfo": version_info,
}
self.logger.info(
f"Requesting creation of package version {version_number} "
f"for package {package_config.package_name} ({package_id})"
)
response = Package2CreateVersionRequest.create(request)
self.logger.info(
f"Package2VersionCreateRequest created with id {response['id']}"
)
return response["id"]
def _get_highest_version_parts(self, package_id):
"""Get the version parts for the highest existing version of the specified package."""
res = self.tooling.query(
"SELECT MajorVersion, MinorVersion, PatchVersion, BuildNumber, IsReleased "
"FROM Package2Version "
f"WHERE Package2Id='{package_id}' "
"ORDER BY MajorVersion DESC, MinorVersion DESC, PatchVersion DESC, BuildNumber DESC "
"LIMIT 1"
)
if res["size"]:
return res["records"][0]
return {
"MajorVersion": 0,
"MinorVersion": 0,
"PatchVersion": 0,
"BuildNumber": 0,
"IsReleased": False,
}
def _get_next_version_number(self, version_parts, version_type: VersionTypeEnum):
"""Predict the next package version.
Given existing version parts (major/minor/patch) and a version type,
determine the number to request for the next version.
"""
new_version_parts = {
"MajorVersion": version_parts["MajorVersion"],
"MinorVersion": version_parts["MinorVersion"],
"PatchVersion": version_parts["PatchVersion"],
"BuildNumber": "NEXT",
}
if version_parts["IsReleased"]:
if version_type == VersionTypeEnum.major:
new_version_parts["MajorVersion"] += 1
new_version_parts["MinorVersion"] = 0
new_version_parts["PatchVersion"] = 0
if version_type == VersionTypeEnum.minor:
new_version_parts["MinorVersion"] += 1
new_version_parts["PatchVersion"] = 0
elif version_type == VersionTypeEnum.patch:
new_version_parts["PatchVersion"] += 1
return self._get_version_number(new_version_parts)
def _get_version_number(self, version):
"""Format version fields from Package2Version as a version number."""
return "{MajorVersion}.{MinorVersion}.{PatchVersion}.{BuildNumber}".format(
**version
)
def _get_dependencies(self):
"""Resolve dependencies into SubscriberPackageVersionIds (04t prefix)"""
dependencies = self.project_config.get_static_dependencies()
# If any dependencies are expressed as a 1gp namespace + version,
# we need to convert those to 04t package version ids,
# for which we need an org with the packages installed.
if self._has_1gp_namespace_dependency(dependencies):
dependencies = self.org_config.resolve_04t_dependencies(dependencies)
# Convert dependencies to correct format for Package2VersionCreateRequest
dependencies = self._convert_project_dependencies(dependencies)
# Build additional packages for local unpackaged/pre
dependencies = self._get_unpackaged_pre_dependencies(dependencies)
return dependencies
def _has_1gp_namespace_dependency(self, project_dependencies):
"""Returns true if any dependencies are specified using a namespace rather than 04t"""
for dependency in project_dependencies:
if "namespace" in dependency:
return True
if "dependencies" in dependency:
if self._has_1gp_namespace_dependency(dependency["dependencies"]):
return True
return False
def _convert_project_dependencies(self, dependencies):
"""Convert dependencies into the format expected by Package2VersionCreateRequest.
For dependencies expressed as a github repo subfolder, build an unlocked package from that.
"""
new_dependencies = []
for dependency in dependencies:
if dependency.get("dependencies"):
new_dependencies.extend(
self._convert_project_dependencies(dependency["dependencies"])
)
new_dependency = {}
if dependency.get("version_id"):
name = (
f"{dependency['namespace']}@{dependency['version']} "
if "namespace" in dependency
else ""
)
self.logger.info(
f"Adding dependency {name} with id {dependency['version_id']}"
)
new_dependency["subscriberPackageVersionId"] = dependency["version_id"]
elif dependency.get("repo_name"):
version_id = self._create_unlocked_package_from_github(
dependency, new_dependencies
)
self.logger.info(
"Adding dependency {}/{} {} with id {}".format(
dependency["repo_owner"],
dependency["repo_name"],
dependency["subfolder"],
version_id,
)
)
new_dependency["subscriberPackageVersionId"] = version_id
else:
raise DependencyLookupError(
f"Unable to convert dependency: {dependency}"
)
new_dependencies.append(new_dependency)
return new_dependencies
def _get_unpackaged_pre_dependencies(self, dependencies):
"""Create package for unpackaged/pre metadata, if necessary"""
path = pathlib.Path("unpackaged", "pre")
if not path.exists():
return dependencies
for item_path in sorted(path.iterdir(), key=str):
if item_path.is_dir():
version_id = self._create_unlocked_package_from_local(
item_path, dependencies
)
self.logger.info(
"Adding dependency {}/{} {} with id {}".format(
self.project_config.repo_owner,
self.project_config.repo_name,
item_path,
version_id,
)
)
dependencies.append({"subscriberPackageVersionId": version_id})
return dependencies
def _create_unlocked_package_from_github(self, dependency, dependencies):
gh_for_repo = self.project_config.get_github_api(
dependency["repo_owner"], dependency["repo_name"]
)
zip_src = download_extract_github(
gh_for_repo,
dependency["repo_owner"],
dependency["repo_name"],
dependency["subfolder"],
ref=dependency.get("ref"),
)
package_zip_builder = MetadataPackageZipBuilder.from_zipfile(
zip_src, options=dependency, logger=self.logger
)
package_config = PackageConfig(
package_name="{repo_owner}/{repo_name} {subfolder}".format(**dependency),
version_name="Auto",
package_type="Unlocked",
# Ideally we'd do this without a namespace,
# but it needs to match the dependent package
namespace=self.package_config.namespace,
)
package_id = self._get_or_create_package(package_config)
self.request_id = self._create_version_request(
package_id,
package_config,
package_zip_builder,
dependencies=dependencies,
)
self._poll()
self._reset_poll()
res = self.tooling.query(
"SELECT SubscriberPackageVersionId FROM Package2Version "
f"WHERE Id='{self.package_version_id}'"
)
package2_version = res["records"][0]
return package2_version["SubscriberPackageVersionId"]
def _create_unlocked_package_from_local(self, path, dependencies):
"""Create an unlocked package version from a local directory."""
self.logger.info("Creating package for dependencies in {}".format(path))
package_name = (
f"{self.project_config.repo_owner}/{self.project_config.repo_name} {path}"
)
package_zip_builder = MetadataPackageZipBuilder(
path=path, name=package_name, logger=self.logger
)
package_config = PackageConfig(
package_name=package_name,
version_name="Auto",
package_type="Unlocked",
# Ideally we'd do this without a namespace,
# but it needs to match the dependent package
namespace=self.package_config.namespace,
)
package_id = self._get_or_create_package(package_config)
self.request_id = self._create_version_request(
package_id, package_config, package_zip_builder, dependencies=dependencies
)
self._poll()
self._reset_poll()
res = self.tooling.query(
"SELECT SubscriberPackageVersionId FROM Package2Version "
f"WHERE Id='{self.package_version_id}'"
)
package2_version = res["records"][0]
return package2_version["SubscriberPackageVersionId"]
def _poll_action(self):
"""Check if Package2VersionCreateRequest has completed."""
res = self.tooling.query(
f"SELECT Id, Status, Package2VersionId FROM Package2VersionCreateRequest WHERE Id = '{self.request_id}'"
)
request = res["records"][0]
if request["Status"] == "Success":
self.logger.info("[Success]: Package creation successful")
self.poll_complete = True
self.package_version_id = request["Package2VersionId"]
elif request["Status"] == "Error":
self.logger.error("[Error]: Package creation failed with error:")
res = self.tooling.query(
"SELECT Message FROM Package2VersionCreateRequestError "
f"WHERE ParentRequestId = '{request['Id']}'"
)
errors = []
if res["size"] > 0:
for error in res["records"]:
errors.append(error["Message"])
self.logger.error(error["Message"])
raise PackageUploadFailure("\n".join(errors))
else:
self.logger.info(f"[{request['Status']}]")
| {
"content_hash": "9bfb856fd86f31e9a5d3c1db65ee37b9",
"timestamp": "",
"source": "github",
"line_count": 602,
"max_line_length": 182,
"avg_line_length": 41.84551495016611,
"alnum_prop": 0.5916001746655551,
"repo_name": "SalesforceFoundation/CumulusCI",
"id": "413209e447e230fcba8dc53021933ddf1daa544c",
"size": "25191",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "cumulusci/tasks/package_2gp.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Makefile",
"bytes": "2303"
},
{
"name": "Python",
"bytes": "754354"
},
{
"name": "RobotFramework",
"bytes": "9330"
},
{
"name": "Shell",
"bytes": "5555"
}
],
"symlink_target": ""
} |
from __future__ import absolute_import, division, print_function
import math
import thread
import roslib; roslib.load_manifest('ev3_ros')
import rospy
from sensor_msgs.msg import JointState, Imu
from PyKDL import Rotation
import rpyc
import nxt.error
from nxt_msgs.msg import Range, Contact, JointCommand, Color, Gyro, Accelerometer
class Device(object):
def __init__(self, params):
self.desired_period = 1.0 / params['desired_frequency']
self.period = self.desired_period
self.initialized = False
self.name = params['name']
def needs_trigger(self):
# initialize
if not self.initialized:
self.initialized = True
self.last_run = rospy.Time.now()
rospy.logdebug('Initializing %s', self.name)
return False
# compute frequency
now = rospy.Time.now()
period = 0.9 * self.period + 0.1 * (now - self.last_run).to_sec()
# check period
if period > self.desired_period * 1.2:
rospy.logwarn('%s not reaching desired frequency: actual %f, desired %f',
self.name, 1.0 / period, 1.0 / self.desired_period)
elif period > self.desired_period * 1.5:
rospy.logerr('%s not reaching desired frequency: actual %f, desired %f',
self.name, 1.0 / period, 1.0 / self.desired_period)
return period > self.desired_period
def trigger(self):
raise NotImplementedError()
def do_trigger(self):
try:
rospy.logdebug('Trigger %s with current frequency %f',
self.name, 1.0/self.period)
now = rospy.Time.now()
self.period = 0.9 * self.period + 0.1 * (now - self.last_run).to_sec()
self.last_run = now
self.trigger()
rospy.logdebug('Trigger %s took %f mili-seconds',
self.name, (rospy.Time.now() - now).to_sec() * 1000)
except nxt.error.I2CError:
rospy.logwarn('caught an exception nxt.error.I2CError')
except nxt.error.DirProtError:
rospy.logwarn('caught an exception nxt.error.DirProtError')
class Motor(Device):
POWER_TO_NM = 50
POWER_MAX = 50
def __init__(self, params, lego):
super(Motor, self).__init__(params)
# create motor
self.motor = lego.Motor(params['port'])
self.cmd = 0 # default command
# create publisher
self.pub = rospy.Publisher('joint_state', JointState, queue_size=5)
# create subscriber
self.sub = rospy.Subscriber('joint_command', JointCommand, self.cmd_cb,
None, queue_size=2)
def cmd_cb(self, msg):
if msg.name == self.name:
cmd = msg.effort * self.POWER_TO_NM
if cmd > self.POWER_MAX:
cmd = self.POWER_MAX
elif cmd < -self.POWER_MAX:
cmd = -self.POWER_MAX
self.cmd = cmd # save command
def trigger(self):
js = JointState()
js.header.stamp = rospy.Time.now()
js.name.append(self.name)
js.position.append(math.radians(self.motor.position))
js.velocity.append(math.radians(self.motor.pulses_per_second))
js.effort.append(0)
self.pub.publish(js)
# send command
self.motor.run_forever(int(self.cmd), regulation_mode=False)
class UltraSonicSensor(Device):
def __init__(self, params, lego):
super(UltraSonicSensor, self).__init__(params)
# create ultrasonic sensor
self.ultrasonic = lego.UltrasonicSensor(params['port'])
self.frame_id = params['frame_id']
self.spread = params['spread_angle']
self.min_range = params['min_range']
self.max_range = params['max_range']
# create publisher
self.pub = rospy.Publisher(params['name'], Range, queue_size=5)
def trigger(self):
ds = Range()
ds.header.frame_id = self.frame_id
ds.header.stamp = rospy.Time.now()
ds.range = self.ultrasonic.dist_cm / 100.0
ds.spread_angle = self.spread
ds.range_min = self.min_range
ds.range_max = self.max_range
self.pub.publish(ds)
class GyroSensor(Device):
def __init__(self, params, lego):
super(GyroSensor, self).__init__(params)
# create gyro sensor
self.gyro = lego.GyroSensor(params['port'])
self.frame_id = params['frame_id']
self.orientation = 0.0
self.offset = 0.0
self.prev_time = rospy.Time.now()
# calibrate
rospy.loginfo('Calibrating Gyro. Don\'t move the robot now')
start_time = rospy.Time.now()
cal_duration = rospy.Duration(2)
offset = 0
tmp_time = rospy.Time.now()
while rospy.Time.now() < start_time + cal_duration:
rospy.sleep(0.01)
sample = self.gyro.ang
now = rospy.Time.now()
offset += (sample * (now - tmp_time).to_sec())
tmp_time = now
self.offset = offset / (tmp_time - start_time).to_sec()
rospy.loginfo('Gyro calibrated with offset %f', self.offset)
# create publisher
self.pub = rospy.Publisher(params['name'], Gyro, queue_size=5)
# create publisher
self.pub2 = rospy.Publisher(params['name'] + '_imu', Imu, queue_size=5)
def trigger(self):
sample = self.gyro.ang
gs = Gyro()
gs.header.frame_id = self.frame_id
gs.header.stamp = rospy.Time.now()
gs.calibration_offset.x = 0.0
gs.calibration_offset.y = 0.0
gs.calibration_offset.z = self.offset
gs.angular_velocity.x = 0.0
gs.angular_velocity.y = 0.0
gs.angular_velocity.z = (sample - self.offset) * math.pi / 180.0
gs.angular_velocity_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 1]
self.pub.publish(gs)
imu = Imu()
imu.header.frame_id = self.frame_id
imu.header.stamp = rospy.Time.now()
imu.angular_velocity.x = 0.0
imu.angular_velocity.y = 0.0
imu.angular_velocity.z = (sample-self.offset) * math.pi / 180.0
imu.angular_velocity_covariance = [0, 0, 0, 0, 0, 0, 0, 0, 1]
imu.orientation_covariance = [0.001, 0, 0, 0, 0.001, 0, 0, 0, 0.1]
self.orientation += imu.angular_velocity.z * (imu.header.stamp - self.prev_time).to_sec()
self.prev_time = imu.header.stamp
imu.orientation.x, imu.orientation.y, imu.orientation.z, imu.orientation.w = \
Rotation.RotZ(self.orientation).GetQuaternion()
self.pub2.publish(imu)
def main():
rospy.init_node('ev3_ros')
ns = 'ev3_robot'
host = rospy.get_param('~host', None)
conn = rpyc.classic.connect(host)
lego = conn.modules.ev3.lego
config = rospy.get_param('~' + ns)
components = []
for c in config:
rospy.loginfo('Creating %s with name %s on %s',
c['type'], c['name'], c['port'])
if c['type'] == 'motor':
components.append(Motor(c, lego))
elif c['type'] == 'ultrasonic':
components.append(UltraSonicSensor(c, lego))
elif c['type'] == 'gyro':
components.append(GyroSensor(c, lego))
else:
rospy.logerr('Invalid sensor/actuator type %s', c['type'])
callback_handle_frequency = 10.0
last_callback_handle = rospy.Time.now()
lock = thread.allocate_lock()
while not rospy.is_shutdown():
lock.acquire()
triggered = False
for c in components:
if c.needs_trigger() and not triggered:
c.do_trigger()
triggered = True
lock.release()
now = rospy.Time.now()
if (now - last_callback_handle).to_sec() > 1.0 / callback_handle_frequency:
last_callback_handle = now
rospy.sleep(0.01)
if __name__ == '__main__':
main()
| {
"content_hash": "0518bcfedd0125c55362bc713a80ab3a",
"timestamp": "",
"source": "github",
"line_count": 226,
"max_line_length": 97,
"avg_line_length": 35.23893805309734,
"alnum_prop": 0.5752134605725766,
"repo_name": "youtalk/mindstorms_ros",
"id": "18070129df9436effbdbad28cef858aae03bb04a",
"size": "8010",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ev3/ev3_ros/scripts/ev3_ros.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "16133"
},
{
"name": "CMake",
"bytes": "17131"
},
{
"name": "Python",
"bytes": "113963"
}
],
"symlink_target": ""
} |
"""
This script will handle the initial configuration using command line arguments and the YAML config file and calling the metadatanavigator functions to enable/disable debug, color and json support.
Once all configuration are set, the script will call the mnavigator function, imported from metadatanavigator, with the parameters configured by the user.
"""
from metadatanavigator import *
import json
import yaml
import argparse
def create_parser():
"""
Create a configuration parser and parses the commandline configuration.
"""
parser = argparse.ArgumentParser(description='A tool to navigate through AWS EC2 instance metadata')
parser.add_argument("-d", "--debug", help="Debug Mode", action="store_true")
parser.add_argument("-j", "--json", help="JSON output mode", action="store_true")
parser.add_argument("-c", "--color", help="COLOR output mode", action="store_true")
parser.add_argument("-a", "--all", help="Dump all metadata", action="store_true")
parser.add_argument("-C", "--config", help="Config file path [default mnavigator.yaml]",nargs='?',const="mnavigator.yaml", default="mnavigator.yaml")
parser.add_argument("-p", "--path", help="Metadata PATH for pipe output mode [Disables INTERATIVE mode][DEFAULT ROOT]",nargs='?', const="/")
args=parser.parse_args()
return args
def main():
"""
Main function. Handle configuration settings and metadata navigator's main functions.
"""
try:
args=create_parser()
with open(args.config,'r') as ymlfile:
cfg = yaml.load(ymlfile)
if args.debug or cfg['debug']==True:
"""
Enable debug.
"""
setdebugstatus(True)
print ("ARGUMENTS: ",args)
print ("LOADED CONFIG: ", cfg)
if args.json or cfg['jsonenable']:
"""
Enable JSON output.
"""
setjsonstatus()
if args.color or cfg['colorenable']:
"""
Enable COLOR output.
"""
setcolorstatus()
if args.path:
"""Enable PIPE mode"""
pipemode=True
pipemodepath=args.path
else:
"""Disable PIPE mode"""
pipemode=False
pipemodepath=None
if args.all:
"""Dump all metadata"""
result=metadatadump()
print(result)
exit(0)
"""
Setup the rest of the configuration, like colors for the prompt and texts.
"""
setconfig(cfg)
"""
Call tool's main function.
"""
mnavigator(pipemode, pipemodepath)
except IOError:
print("ERROR: Could not find YAML configuration file",args.config)
except Exception as e:
print("ERROR: Unrecoverable error: ",e)
if __name__ == '__main__':
main()
| {
"content_hash": "d156a4d35a632d44d2442fa7d5fb7717",
"timestamp": "",
"source": "github",
"line_count": 87,
"max_line_length": 196,
"avg_line_length": 33.160919540229884,
"alnum_prop": 0.5996533795493935,
"repo_name": "marcelocrnunes/metadatanavigator",
"id": "ad69a2290009d915b0928af171be8c8400623507",
"size": "2904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mnavigator.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "27513"
}
],
"symlink_target": ""
} |
"""
This module provides relevance matching and formatting of related strings
based on the relevance. It is borrowed from Gnome-Do with modifications
to fit nicely into python.
>>> import relevance
>>> relevance.score('hi there dude', 'hi dude')
0.53480769230769232
>>> relevance.formatCommonSubstrings('hi there dude', 'hi dude')
'<b>hi </b>there <b>dude</b>'
"""
def formatCommonSubstrings(main, other, format = '<b>%s</b>'):
"""
Creates a new string using @format to highlight matching substrings
of @other in @main.
Returns: a formatted str
>>> formatCommonSubstrings('hi there dude', 'hi dude')
'<b>hi </b>there <b>dude</b>'
"""
length = 0
result = ''
match_pos = last_main_cut = 0
lower_main = main.lower()
other = other.lower()
for pos in range(len(other)):
matchedTermination = False
for length in range(1, 1 + len(other) - pos + 1):
tmp_match_pos = _index(lower_main, other[pos:pos + length])
if tmp_match_pos < 0:
length -= 1
matchedTermination = False
break
else:
matchedTermination = True
match_pos = tmp_match_pos
if matchedTermination:
length -= 1
if 0 < length:
# There is a match starting at match_pos with positive length
skipped = main[last_main_cut:match_pos - last_main_cut]
matched = main[match_pos:match_pos + length]
if len(skipped) + len(matched) < len(main):
remainder = formatCommonSubstrings(
main[match_pos + length:],
other[pos + length:],
format)
else:
remainder = ''
result = '%s%s%s' % (skipped, format % matched, remainder)
break
if result == '':
# No matches
result = main
return result
def score(s, query):
"""
A relevancy score for the string ranging from 0 to 1
@s: a str to be scored
@query: a str query to score against
Returns: a float between 0 and 1
>>> score('terminal', 'trml')
0.52875000000000005
>>> score('terminal', 'term')
0.96750000000000003
"""
if len(query) == 0:
return 1
ls = s.lower()
lquery = query.lower()
lastPos = 0
for c in lquery:
lastPos = ls.find(c, lastPos)
if lastPos == -1:
return 0
score = float(0)
# Find the shortest possible substring that matches the query
# and get the ration of their lengths for a base score
match = _findBestMatch(ls, lquery)
if match[1] - match[0] == 0:
return .0
score = len(lquery) / float(match[1] - match[0])
if score == 0:
return .0
# Now we weight by string length so shorter strings are better
score *= .7 + len(lquery) / len(s) * .3
# Bonus points if the characters start words
good = 0
bad = 1
firstCount = 0
for i in range(match[0], match[1] - 1):
if s[i] == ' ':
if ls[i + 1] in lquery:
firstCount += 1
else:
bad += 1
# A first character match counts extra
if lquery[0] == ls[0]:
firstCount += 2
# The longer the acronym, the better it scores
good += firstCount * firstCount * 4
# Better yet if the match itself started there
if match[0] == 0:
good += 2
# Super bonus if the whole match is at the beginning
if match[1] == len(lquery) - 1:
good += match[1] + 4
# Super duper bonus if it is a perfect match
if lquery == ls:
good += match[1] * 2 + 4
if good + bad > 0:
score = (score + 3 * good / (good + bad)) / 4
# This fix makes sure tha tperfect matches always rank higher
# than split matches. Perfect matches get the .9 - 1.0 range
# everything else lower
if match[1] - match[0] == len(lquery):
score = .9 + .1 * score
else:
score = .9 * score
return score
def _findBestMatch(s, query):
"""
Finds the shortest substring of @s that contains all characters of query
in order.
@s: a str to search
@query: a str query to search for
Returns: a two-item tuple containing the start and end indicies of
the match. No match returns (-1,-1).
"""
if len(query) == 0:
return 0, 0
index = -1
bestMatch = -1, -1
# Find the last instance of the last character of the query
# since we never need to search beyond that
lastChar = len(s) - 1
while lastChar >= 0 and s[lastChar] != query[-1]:
lastChar -= 1
# No instance of the character?
if lastChar == -1:
return bestMatch
# Loop through each instance of the first character in query
index = _index(s, query[0], index + 1, lastChar - index)
while index >= 0:
# Is there room for a match?
if index > (lastChar + 1 - len(query)):
break
# Look for the best match in the tail
# We know the first char matches, so we dont check it.
cur = index + 1
qcur = 1
while (qcur < len(query)) and (cur < len(s)):
if query[qcur] == s[cur]:
qcur += 1
cur += 1
if ((qcur == len(query)) \
and (((cur - index) < (bestMatch[1] - bestMatch[0])) \
or (bestMatch[0] == -1))):
bestMatch = (index, cur)
if index == (len(s) - 1):
break
index = _index(s, query[0], index + 1, lastChar - index)
return bestMatch
def _index(s, char, index = 0, count = -1):
"""
Looks for the index of @char in @s starting at @index for count bytes.
Returns: int containing the offset of @char. -1 if @char is not found.
>>> _index('hi', 'i', 0, 2)
1
"""
if count >= 0:
s = s[index:index + count]
else:
s = s[index:]
try:
return index + s.index(char)
except ValueError:
return -1
| {
"content_hash": "0f2b6c589285103a89ab0d4ef839e041",
"timestamp": "",
"source": "github",
"line_count": 219,
"max_line_length": 76,
"avg_line_length": 28.534246575342465,
"alnum_prop": 0.533685389662346,
"repo_name": "icebreaker/dotfiles",
"id": "e5e139f67513b0f8fe84cff9ceee4e22da2aa917",
"size": "7265",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "gnome/gnome2/gedit/plugins.symlink/gotofile/relevance.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "6086"
},
{
"name": "C++",
"bytes": "2460"
},
{
"name": "CSS",
"bytes": "336717"
},
{
"name": "HTML",
"bytes": "3559"
},
{
"name": "Lua",
"bytes": "1143"
},
{
"name": "Perl",
"bytes": "2966"
},
{
"name": "Python",
"bytes": "884750"
},
{
"name": "Ruby",
"bytes": "50360"
},
{
"name": "Shell",
"bytes": "42835"
},
{
"name": "Vim Script",
"bytes": "3955400"
},
{
"name": "Zig",
"bytes": "1179"
}
],
"symlink_target": ""
} |
import os
import requests
import sys
def main():
# Unknown error raised when the wifi adapter dies - restart RPi to "fix"
url = 'http://www.google.com'
try:
requests.packages.urllib3.disable_warnings()
_ = requests.get(url)
print('Successful ping')
except:
sys.stderr.write('Failure to connect to Google. Restarting.\n')
os.system('sudo shutdown -r now')
if __name__ == '__main__':
main()
| {
"content_hash": "9acbb254b3c96a013a26aedc74d7bd0a",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 76,
"avg_line_length": 23.842105263157894,
"alnum_prop": 0.6158940397350994,
"repo_name": "noelevans/sandpit",
"id": "6843b4560d57518a4f35a1b5ea65fecfbc6542a9",
"size": "453",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rpi/heartbeat_or_restart.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "7565"
},
{
"name": "HTML",
"bytes": "4003856"
},
{
"name": "Julia",
"bytes": "2285"
},
{
"name": "Jupyter Notebook",
"bytes": "257479"
},
{
"name": "OpenEdge ABL",
"bytes": "1071"
},
{
"name": "Perl",
"bytes": "1003"
},
{
"name": "Python",
"bytes": "383797"
},
{
"name": "R",
"bytes": "16913"
},
{
"name": "Shell",
"bytes": "11957"
},
{
"name": "TypeScript",
"bytes": "112"
},
{
"name": "Vim script",
"bytes": "5639"
}
],
"symlink_target": ""
} |
import sys
import datetime
import glob
import traceback
import re
import shlex
import os
DOCUMENTATION = '''
---
module: command
version_added: historical
short_description: Executes a command on a remote node
description:
- The M(command) module takes the command name followed by a list of space-delimited arguments.
- The given command will be executed on all selected nodes. It will not be
processed through the shell, so variables like C($HOME) and operations
like C("<"), C(">"), C("|"), and C("&") will not work (use the M(shell)
module if you need these features).
options:
free_form:
description:
- the command module takes a free form command to run. There is no parameter actually named 'free form'.
See the examples!
required: true
default: null
aliases: []
creates:
description:
- a filename, when it already exists, this step will B(not) be run.
required: no
default: null
removes:
description:
- a filename, when it does not exist, this step will B(not) be run.
version_added: "0.8"
required: no
default: null
chdir:
description:
- cd into this directory before running the command
version_added: "0.6"
required: false
default: null
executable:
description:
- change the shell used to execute the command. Should be an absolute path to the executable.
required: false
default: null
version_added: "0.9"
warn:
version_added: "1.8"
default: yes
description:
- if command warnings are on in ansible.cfg, do not warn about this particular line if set to no/false.
required: false
default: True
notes:
- If you want to run a command through the shell (say you are using C(<),
C(>), C(|), etc), you actually want the M(shell) module instead. The
M(command) module is much more secure as it's not affected by the user's
environment.
- " C(creates), C(removes), and C(chdir) can be specified after the command. For instance, if you only want to run a command if a certain file does not exist, use this."
author: Michael DeHaan
'''
EXAMPLES = '''
# Example from Ansible Playbooks.
- command: /sbin/shutdown -t now
# Run the command if the specified file does not exist.
- command: /usr/bin/make_database.sh arg1 arg2 creates=/path/to/database
# You can also use the 'args' form to provide the options. This command
# will change the working directory to somedir/ and will only run when
# /path/to/database doesn't exist.
- command: /usr/bin/make_database.sh arg1 arg2
args:
chdir: somedir/
creates: /path/to/database
'''
# This is a pretty complex regex, which functions as follows:
#
# 1. (^|\s)
# ^ look for a space or the beginning of the line
# 2. (creates|removes|chdir|executable|NO_LOG)=
# ^ look for a valid param, followed by an '='
# 3. (?P<quote>[\'"])?
# ^ look for an optional quote character, which can either be
# a single or double quote character, and store it for later
# 4. (.*?)
# ^ match everything in a non-greedy manner until...
# 5. (?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)
# ^ a non-escaped space or a non-escaped quote of the same kind
# that was matched in the first 'quote' is found, or the end of
# the line is reached
PARAM_REGEX = re.compile(r'(^|\s)(creates|removes|chdir|executable|NO_LOG|warn)=(?P<quote>[\'"])?(.*?)(?(quote)(?<!\\)(?P=quote))((?<!\\)(?=\s)|$)')
def check_command(commandline):
arguments = { 'chown': 'owner', 'chmod': 'mode', 'chgrp': 'group',
'ln': 'state=link', 'mkdir': 'state=directory',
'rmdir': 'state=absent', 'rm': 'state=absent', 'touch': 'state=touch' }
commands = { 'git': 'git', 'hg': 'hg', 'curl': 'get_url', 'wget': 'get_url',
'svn': 'subversion', 'service': 'service',
'mount': 'mount', 'rpm': 'yum', 'yum': 'yum', 'apt-get': 'apt-get',
'tar': 'unarchive', 'unzip': 'unarchive', 'sed': 'template or lineinfile',
'rsync': 'synchronize' }
warnings = list()
command = os.path.basename(commandline.split()[0])
if command in arguments:
warnings.append("Consider using file module with %s rather than running %s" % (arguments[command], command))
if command in commands:
warnings.append("Consider using %s module rather than running %s" % (commands[command], command))
return warnings
def main():
# the command module is the one ansible module that does not take key=value args
# hence don't copy this one if you are looking to build others!
module = CommandModule(argument_spec=dict())
shell = module.params['shell']
chdir = module.params['chdir']
executable = module.params['executable']
args = module.params['args']
creates = module.params['creates']
removes = module.params['removes']
warn = module.params.get('warn', True)
if args.strip() == '':
module.fail_json(rc=256, msg="no command given")
if chdir:
os.chdir(chdir)
if creates:
# do not run the command if the line contains creates=filename
# and the filename already exists. This allows idempotence
# of command executions.
v = os.path.expanduser(creates)
if glob.glob(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s exists" % v,
changed=False,
stderr=False,
rc=0
)
if removes:
# do not run the command if the line contains removes=filename
# and the filename does not exist. This allows idempotence
# of command executions.
v = os.path.expanduser(removes)
if not glob.glob(v):
module.exit_json(
cmd=args,
stdout="skipped, since %s does not exist" % v,
changed=False,
stderr=False,
rc=0
)
warnings = list()
if warn:
warnings = check_command(args)
if not shell:
args = shlex.split(args)
startd = datetime.datetime.now()
rc, out, err = module.run_command(args, executable=executable, use_unsafe_shell=shell)
endd = datetime.datetime.now()
delta = endd - startd
if out is None:
out = ''
if err is None:
err = ''
module.exit_json(
cmd = args,
stdout = out.rstrip("\r\n"),
stderr = err.rstrip("\r\n"),
rc = rc,
start = str(startd),
end = str(endd),
delta = str(delta),
changed = True,
warnings = warnings
)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.splitter import *
# only the command module should ever need to do this
# everything else should be simple key=value
class CommandModule(AnsibleModule):
def _handle_aliases(self):
return {}
def _check_invalid_arguments(self):
pass
def _load_params(self):
''' read the input and return a dictionary and the arguments string '''
args = MODULE_ARGS
params = {}
params['chdir'] = None
params['creates'] = None
params['removes'] = None
params['shell'] = False
params['executable'] = None
params['warn'] = True
if "#USE_SHELL" in args:
args = args.replace("#USE_SHELL", "")
params['shell'] = True
items = split_args(args)
for x in items:
quoted = x.startswith('"') and x.endswith('"') or x.startswith("'") and x.endswith("'")
if '=' in x and not quoted:
# check to see if this is a special parameter for the command
k, v = x.split('=', 1)
v = unquote(v.strip())
if k in ('creates', 'removes', 'chdir', 'executable', 'NO_LOG'):
if k == "chdir":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v) and os.path.isdir(v)):
self.fail_json(rc=258, msg="cannot change to directory '%s': path does not exist" % v)
elif k == "executable":
v = os.path.abspath(os.path.expanduser(v))
if not (os.path.exists(v)):
self.fail_json(rc=258, msg="cannot use executable '%s': file does not exist" % v)
params[k] = v
# Remove any of the above k=v params from the args string
args = PARAM_REGEX.sub('', args)
params['args'] = args.strip()
return (params, params['args'])
main()
| {
"content_hash": "c67bd61d66a495abd1398cec44a3c497",
"timestamp": "",
"source": "github",
"line_count": 251,
"max_line_length": 174,
"avg_line_length": 34.832669322709165,
"alnum_prop": 0.5919020931030539,
"repo_name": "dispansible/dispansible",
"id": "bc286d6855debdcd718ef173d73ba488311b03ba",
"size": "9507",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ansible/library/disp_command.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80772"
},
{
"name": "Shell",
"bytes": "3683"
}
],
"symlink_target": ""
} |
from __future__ import print_function
import pytest
from datetime import datetime, timedelta
import itertools
from numpy import nan
import numpy as np
from pandas import (DataFrame, Series, Timestamp, date_range, compat,
option_context, Categorical)
from pandas.core.arrays import IntervalArray, integer_array
from pandas.compat import StringIO
import pandas as pd
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
# Segregated collection of methods that require the BlockManager internal data
# structure
class TestDataFrameBlockInternals(TestData):
def test_cast_internals(self):
casted = DataFrame(self.frame._data, dtype=int)
expected = DataFrame(self.frame._series, dtype=int)
assert_frame_equal(casted, expected)
casted = DataFrame(self.frame._data, dtype=np.int32)
expected = DataFrame(self.frame._series, dtype=np.int32)
assert_frame_equal(casted, expected)
def test_consolidate(self):
self.frame['E'] = 7.
consolidated = self.frame._consolidate()
assert len(consolidated._data.blocks) == 1
# Ensure copy, do I want this?
recons = consolidated._consolidate()
assert recons is not consolidated
tm.assert_frame_equal(recons, consolidated)
self.frame['F'] = 8.
assert len(self.frame._data.blocks) == 3
self.frame._consolidate(inplace=True)
assert len(self.frame._data.blocks) == 1
def test_consolidate_deprecation(self):
self.frame['E'] = 7
with tm.assert_produces_warning(FutureWarning):
self.frame.consolidate()
def test_consolidate_inplace(self):
frame = self.frame.copy() # noqa
# triggers in-place consolidation
for letter in range(ord('A'), ord('Z')):
self.frame[chr(letter)] = chr(letter)
def test_values_consolidate(self):
self.frame['E'] = 7.
assert not self.frame._data.is_consolidated()
_ = self.frame.values # noqa
assert self.frame._data.is_consolidated()
def test_modify_values(self):
self.frame.values[5] = 5
assert (self.frame.values[5] == 5).all()
# unconsolidated
self.frame['E'] = 7.
self.frame.values[6] = 6
assert (self.frame.values[6] == 6).all()
def test_boolean_set_uncons(self):
self.frame['E'] = 7.
expected = self.frame.values.copy()
expected[expected > 1] = 2
self.frame[self.frame > 1] = 2
assert_almost_equal(expected, self.frame.values)
def test_values_numeric_cols(self):
self.frame['foo'] = 'bar'
values = self.frame[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
def test_values_lcd(self):
# mixed lcd
values = self.mixed_float[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = self.mixed_float[['A', 'B', 'C']].values
assert values.dtype == np.float32
values = self.mixed_float[['C']].values
assert values.dtype == np.float16
# GH 10364
# B uint64 forces float because there are other signed int types
values = self.mixed_int[['A', 'B', 'C', 'D']].values
assert values.dtype == np.float64
values = self.mixed_int[['A', 'D']].values
assert values.dtype == np.int64
# B uint64 forces float because there are other signed int types
values = self.mixed_int[['A', 'B', 'C']].values
assert values.dtype == np.float64
# as B and C are both unsigned, no forcing to float is needed
values = self.mixed_int[['B', 'C']].values
assert values.dtype == np.uint64
values = self.mixed_int[['A', 'C']].values
assert values.dtype == np.int32
values = self.mixed_int[['C', 'D']].values
assert values.dtype == np.int64
values = self.mixed_int[['A']].values
assert values.dtype == np.int32
values = self.mixed_int[['C']].values
assert values.dtype == np.uint8
def test_constructor_with_convert(self):
# this is actually mostly a test of lib.maybe_convert_objects
# #2845
df = DataFrame({'A': [2 ** 63 - 1]})
result = df['A']
expected = Series(np.asarray([2 ** 63 - 1], np.int64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2 ** 63]})
result = df['A']
expected = Series(np.asarray([2 ** 63], np.uint64), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [datetime(2005, 1, 1), True]})
result = df['A']
expected = Series(np.asarray([datetime(2005, 1, 1), True], np.object_),
name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [None, 1]})
result = df['A']
expected = Series(np.asarray([np.nan, 1], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, 2]})
result = df['A']
expected = Series(np.asarray([1.0, 2], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, 3.0]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, 3.0], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, True]})
result = df['A']
expected = Series(np.asarray([1.0 + 2.0j, True], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0, None]})
result = df['A']
expected = Series(np.asarray([1.0, np.nan], np.float_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [1.0 + 2.0j, None]})
result = df['A']
expected = Series(np.asarray(
[1.0 + 2.0j, np.nan], np.complex_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, True, None]})
result = df['A']
expected = Series(np.asarray(
[2.0, 1, True, None], np.object_), name='A')
assert_series_equal(result, expected)
df = DataFrame({'A': [2.0, 1, datetime(2006, 1, 1), None]})
result = df['A']
expected = Series(np.asarray([2.0, 1, datetime(2006, 1, 1),
None], np.object_), name='A')
assert_series_equal(result, expected)
def test_construction_with_mixed(self):
# test construction edge cases with mixed types
# f7u12, this does not work without extensive workaround
data = [[datetime(2001, 1, 5), nan, datetime(2001, 1, 2)],
[datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 1)]]
df = DataFrame(data)
# check dtypes
result = df.get_dtype_counts().sort_values()
expected = Series({'datetime64[ns]': 3})
# mixed-type frames
self.mixed_frame['datetime'] = datetime.now()
self.mixed_frame['timedelta'] = timedelta(days=1, seconds=1)
assert self.mixed_frame['datetime'].dtype == 'M8[ns]'
assert self.mixed_frame['timedelta'].dtype == 'm8[ns]'
result = self.mixed_frame.get_dtype_counts().sort_values()
expected = Series({'float64': 4,
'object': 1,
'datetime64[ns]': 1,
'timedelta64[ns]': 1}).sort_values()
assert_series_equal(result, expected)
def test_construction_with_conversions(self):
# convert from a numpy array of non-ns timedelta64
arr = np.array([1, 2, 3], dtype='timedelta64[s]')
df = DataFrame(index=range(3))
df['A'] = arr
expected = DataFrame({'A': pd.timedelta_range('00:00:01', periods=3,
freq='s')},
index=range(3))
assert_frame_equal(df, expected)
expected = DataFrame({
'dt1': Timestamp('20130101'),
'dt2': date_range('20130101', periods=3),
# 'dt3' : date_range('20130101 00:00:01',periods=3,freq='s'),
}, index=range(3))
df = DataFrame(index=range(3))
df['dt1'] = np.datetime64('2013-01-01')
df['dt2'] = np.array(['2013-01-01', '2013-01-02', '2013-01-03'],
dtype='datetime64[D]')
# df['dt3'] = np.array(['2013-01-01 00:00:01','2013-01-01
# 00:00:02','2013-01-01 00:00:03'],dtype='datetime64[s]')
assert_frame_equal(df, expected)
def test_constructor_compound_dtypes(self):
# GH 5191
# compound dtypes should raise not-implementederror
def f(dtype):
data = list(itertools.repeat((datetime(2001, 1, 1),
"aa", 20), 9))
return DataFrame(data=data,
columns=["A", "B", "C"],
dtype=dtype)
pytest.raises(NotImplementedError, f,
[("A", "datetime64[h]"),
("B", "str"),
("C", "int32")])
# these work (though results may be unexpected)
f('int64')
f('float64')
# 10822
# invalid error message on dt inference
if not compat.is_platform_windows():
f('M8[ns]')
def test_equals_different_blocks(self):
# GH 9330
df0 = pd.DataFrame({"A": ["x", "y"], "B": [1, 2],
"C": ["w", "z"]})
df1 = df0.reset_index()[["A", "B", "C"]]
# this assert verifies that the above operations have
# induced a block rearrangement
assert (df0._data.blocks[0].dtype != df1._data.blocks[0].dtype)
# do the real tests
assert_frame_equal(df0, df1)
assert df0.equals(df1)
assert df1.equals(df0)
def test_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the default copy=True, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks()
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did not change the original DataFrame
assert not _df[column].equals(df[column])
def test_no_copy_blocks(self):
# API/ENH 9607
df = DataFrame(self.frame, copy=True)
column = df.columns[0]
# use the copy=False, change a column
# deprecated 0.21.0
with tm.assert_produces_warning(FutureWarning,
check_stacklevel=False):
blocks = df.as_blocks(copy=False)
for dtype, _df in blocks.items():
if column in _df:
_df.loc[:, column] = _df[column] + 1
# make sure we did change the original DataFrame
assert _df[column].equals(df[column])
def test_copy(self):
cop = self.frame.copy()
cop['E'] = cop['A']
assert 'E' not in self.frame
# copy objects
copy = self.mixed_frame.copy()
assert copy._data is not self.mixed_frame._data
def test_pickle(self):
unpickled = tm.round_trip_pickle(self.mixed_frame)
assert_frame_equal(self.mixed_frame, unpickled)
# buglet
self.mixed_frame._data.ndim
# empty
unpickled = tm.round_trip_pickle(self.empty)
repr(unpickled)
# tz frame
unpickled = tm.round_trip_pickle(self.tzframe)
assert_frame_equal(self.tzframe, unpickled)
def test_consolidate_datetime64(self):
# numpy vstack bug
data = """\
starting,ending,measure
2012-06-21 00:00,2012-06-23 07:00,77
2012-06-23 07:00,2012-06-23 16:30,65
2012-06-23 16:30,2012-06-25 08:00,77
2012-06-25 08:00,2012-06-26 12:00,0
2012-06-26 12:00,2012-06-27 08:00,77
"""
df = pd.read_csv(StringIO(data), parse_dates=[0, 1])
ser_starting = df.starting
ser_starting.index = ser_starting.values
ser_starting = ser_starting.tz_localize('US/Eastern')
ser_starting = ser_starting.tz_convert('UTC')
ser_starting.index.name = 'starting'
ser_ending = df.ending
ser_ending.index = ser_ending.values
ser_ending = ser_ending.tz_localize('US/Eastern')
ser_ending = ser_ending.tz_convert('UTC')
ser_ending.index.name = 'ending'
df.starting = ser_starting.index
df.ending = ser_ending.index
tm.assert_index_equal(pd.DatetimeIndex(
df.starting), ser_starting.index)
tm.assert_index_equal(pd.DatetimeIndex(df.ending), ser_ending.index)
def test_is_mixed_type(self):
assert not self.frame._is_mixed_type
assert self.mixed_frame._is_mixed_type
def test_get_numeric_data(self):
# TODO(wesm): unused?
intname = np.dtype(np.int_).name # noqa
floatname = np.dtype(np.float_).name # noqa
datetime64name = np.dtype('M8[ns]').name
objectname = np.dtype(np.object_).name
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'f': Timestamp('20010102')},
index=np.arange(10))
result = df.get_dtype_counts()
expected = Series({'int64': 1, 'float64': 1,
datetime64name: 1, objectname: 1})
result = result.sort_index()
expected = expected.sort_index()
assert_series_equal(result, expected)
df = DataFrame({'a': 1., 'b': 2, 'c': 'foo',
'd': np.array([1.] * 10, dtype='float32'),
'e': np.array([1] * 10, dtype='int32'),
'f': np.array([1] * 10, dtype='int16'),
'g': Timestamp('20010102')},
index=np.arange(10))
result = df._get_numeric_data()
expected = df.loc[:, ['a', 'b', 'd', 'e', 'f']]
assert_frame_equal(result, expected)
only_obj = df.loc[:, ['c', 'g']]
result = only_obj._get_numeric_data()
expected = df.loc[:, []]
assert_frame_equal(result, expected)
df = DataFrame.from_dict(
{'a': [1, 2], 'b': ['foo', 'bar'], 'c': [np.pi, np.e]})
result = df._get_numeric_data()
expected = DataFrame.from_dict({'a': [1, 2], 'c': [np.pi, np.e]})
assert_frame_equal(result, expected)
df = result.copy()
result = df._get_numeric_data()
expected = df
assert_frame_equal(result, expected)
def test_get_numeric_data_extension_dtype(self):
# GH 22290
df = DataFrame({
'A': integer_array([-10, np.nan, 0, 10, 20, 30], dtype='Int64'),
'B': Categorical(list('abcabc')),
'C': integer_array([0, 1, 2, 3, np.nan, 5], dtype='UInt8'),
'D': IntervalArray.from_breaks(range(7))})
result = df._get_numeric_data()
expected = df.loc[:, ['A', 'C']]
assert_frame_equal(result, expected)
def test_convert_objects(self):
oops = self.mixed_frame.T.T
converted = oops._convert(datetime=True)
assert_frame_equal(converted, self.mixed_frame)
assert converted['A'].dtype == np.float64
# force numeric conversion
self.mixed_frame['H'] = '1.'
self.mixed_frame['I'] = '1'
# add in some items that will be nan
length = len(self.mixed_frame)
self.mixed_frame['J'] = '1.'
self.mixed_frame['K'] = '1'
self.mixed_frame.loc[0:5, ['J', 'K']] = 'garbled'
converted = self.mixed_frame._convert(datetime=True, numeric=True)
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
assert converted['J'].dtype == 'float64'
assert converted['K'].dtype == 'float64'
assert len(converted['J'].dropna()) == length - 5
assert len(converted['K'].dropna()) == length - 5
# via astype
converted = self.mixed_frame.copy()
converted['H'] = converted['H'].astype('float64')
converted['I'] = converted['I'].astype('int64')
assert converted['H'].dtype == 'float64'
assert converted['I'].dtype == 'int64'
# via astype, but errors
converted = self.mixed_frame.copy()
with tm.assert_raises_regex(ValueError, 'invalid literal'):
converted['H'].astype('int32')
# mixed in a single column
df = DataFrame(dict(s=Series([1, 'na', 3, 4])))
result = df._convert(datetime=True, numeric=True)
expected = DataFrame(dict(s=Series([1, np.nan, 3, 4])))
assert_frame_equal(result, expected)
def test_convert_objects_no_conversion(self):
mixed1 = DataFrame(
{'a': [1, 2, 3], 'b': [4.0, 5, 6], 'c': ['x', 'y', 'z']})
mixed2 = mixed1._convert(datetime=True)
assert_frame_equal(mixed1, mixed2)
def test_infer_objects(self):
# GH 11221
df = DataFrame({'a': ['a', 1, 2, 3],
'b': ['b', 2.0, 3.0, 4.1],
'c': ['c', datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [1, 2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
df = df.iloc[1:].infer_objects()
assert df['a'].dtype == 'int64'
assert df['b'].dtype == 'float64'
assert df['c'].dtype == 'M8[ns]'
assert df['d'].dtype == 'object'
expected = DataFrame({'a': [1, 2, 3],
'b': [2.0, 3.0, 4.1],
'c': [datetime(2016, 1, 1),
datetime(2016, 1, 2),
datetime(2016, 1, 3)],
'd': [2, 3, 'd']},
columns=['a', 'b', 'c', 'd'])
# reconstruct frame to verify inference is same
tm.assert_frame_equal(df.reset_index(drop=True), expected)
def test_stale_cached_series_bug_473(self):
# this is chained, but ok
with option_context('chained_assignment', None):
Y = DataFrame(np.random.random((4, 4)), index=('a', 'b', 'c', 'd'),
columns=('e', 'f', 'g', 'h'))
repr(Y)
Y['e'] = Y['e'].astype('object')
Y['g']['c'] = np.NaN
repr(Y)
result = Y.sum() # noqa
exp = Y['g'].sum() # noqa
assert pd.isna(Y['g']['c'])
def test_get_X_columns(self):
# numeric and object columns
df = DataFrame({'a': [1, 2, 3],
'b': [True, False, True],
'c': ['foo', 'bar', 'baz'],
'd': [None, None, None],
'e': [3.14, 0.577, 2.773]})
tm.assert_index_equal(df._get_numeric_data().columns,
pd.Index(['a', 'b', 'e']))
def test_strange_column_corruption_issue(self):
# (wesm) Unclear how exactly this is related to internal matters
df = DataFrame(index=[0, 1])
df[0] = nan
wasCol = {}
# uncommenting these makes the results match
# for col in xrange(100, 200):
# wasCol[col] = 1
# df[col] = nan
for i, dt in enumerate(df.index):
for col in range(100, 200):
if col not in wasCol:
wasCol[col] = 1
df[col] = nan
df[col][dt] = i
myid = 100
first = len(df.loc[pd.isna(df[myid]), [myid]])
second = len(df.loc[pd.isna(df[myid]), [myid]])
assert first == second == 0
| {
"content_hash": "5796a98da549b4764ac18476c9b3b105",
"timestamp": "",
"source": "github",
"line_count": 570,
"max_line_length": 79,
"avg_line_length": 35.76842105263158,
"alnum_prop": 0.5288895428683539,
"repo_name": "cython-testbed/pandas",
"id": "3fe1c84174acba991308aa4247f8018429dfe033",
"size": "20413",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "pandas/tests/frame/test_block_internals.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "4907"
},
{
"name": "C",
"bytes": "404689"
},
{
"name": "C++",
"bytes": "17194"
},
{
"name": "HTML",
"bytes": "551714"
},
{
"name": "Makefile",
"bytes": "574"
},
{
"name": "Python",
"bytes": "14136208"
},
{
"name": "Shell",
"bytes": "27731"
},
{
"name": "Smarty",
"bytes": "2045"
}
],
"symlink_target": ""
} |
'''
Test suite for pcalg
'''
import networkx as nx
import numpy as np
from gsq.ci_tests import ci_test_bin, ci_test_dis
from gsq.gsq_testdata import bin_data, dis_data
import pytest
from pcalg import estimate_cpdag
from pcalg import estimate_skeleton
@pytest.mark.parametrize(('indep_test_func', 'data_matrix', 'g_answer'), [
(ci_test_bin, np.array(bin_data).reshape((5000, 5)), nx.DiGraph({
0: (1, ),
1: (),
2: (3, 4),
3: (1, 2),
4: (1, 2),
})),
(ci_test_dis, np.array(dis_data).reshape((10000, 5)), nx.DiGraph({
0: (2, ),
1: (2, 3),
2: (),
3: (),
4: (3, ),
})),
])
def test_estimate_cpdag(indep_test_func, data_matrix, g_answer, alpha=0.01):
'''
estimate_cpdag should reveal the answer
'''
(graph, sep_set) = estimate_skeleton(indep_test_func=indep_test_func,
data_matrix=data_matrix,
alpha=alpha)
graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set)
error_msg = 'True edges should be: %s' % (g_answer.edges(), )
assert nx.is_isomorphic(graph, g_answer), error_msg
def test_fixed_edges():
'''
The fixed edges shall appear in the skeleton
'''
data_matrix = np.array(bin_data).reshape((5000, 5))
(graph, sep_set) = estimate_skeleton(indep_test_func=ci_test_bin,
data_matrix=data_matrix,
alpha=0.01)
graph = estimate_cpdag(skel_graph=graph, sep_set=sep_set)
assert not graph.has_edge(1, 2)
fixed_edges = nx.DiGraph()
fixed_edges.add_nodes_from(range(5))
fixed_edges.add_edge(1, 2)
with pytest.raises(ValueError):
_ = estimate_skeleton(indep_test_func=ci_test_bin,
data_matrix=data_matrix,
alpha=0.01,
fixed_edges=((1,2), ))
with pytest.raises(ValueError):
_ = estimate_skeleton(indep_test_func=ci_test_bin,
data_matrix=data_matrix,
alpha=0.01,
fixed_edges=nx.DiGraph({0: (1, )}))
(graph, _) = estimate_skeleton(indep_test_func=ci_test_bin,
data_matrix=data_matrix,
alpha=0.01,
fixed_edges=fixed_edges)
assert graph.has_edge(1, 2), graph.edges
| {
"content_hash": "0c5a2a7cc39f197a5326a4fd32a973fe",
"timestamp": "",
"source": "github",
"line_count": 70,
"max_line_length": 76,
"avg_line_length": 35.68571428571428,
"alnum_prop": 0.5176140912730184,
"repo_name": "keiichishima/pcalg",
"id": "55c253c4bbaa682d4f86fb15b23b9396b7464ad0",
"size": "2522",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "test_pcalg.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "15238"
}
],
"symlink_target": ""
} |
import sys
import subprocess
__all__ = ['log_info', 'handle_error', 'run_command', ]
def log_info(msg):
sys.stdout.write('* {}\n'.format(msg))
sys.stdout.flush()
def handle_error(msg):
sys.stderr.write('* {}\n'.format(msg))
sys.exit(1)
def run_command(
command, ignore_error=False, return_stdout=False, capture_stdout=True):
if not isinstance(command, (list, tuple)):
command = [command, ]
command_str = ' '.join(command)
log_info('Running command {}'.format(command_str))
try:
if capture_stdout:
stdout = subprocess.check_output(command)
else:
subprocess.check_call(command)
stdout = None
except subprocess.CalledProcessError as err:
if not ignore_error:
handle_error('Command failed: {}'.format(err))
else:
return stdout.decode() if return_stdout else None
| {
"content_hash": "78576725a8c6e78b2d634ed04ecaadca",
"timestamp": "",
"source": "github",
"line_count": 34,
"max_line_length": 79,
"avg_line_length": 26.558823529411764,
"alnum_prop": 0.6124031007751938,
"repo_name": "aknuds1/deploy-ecs",
"id": "307c1d4dd59a1b0f98adc5addc7806ab442da48b",
"size": "903",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "_common.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "5181"
}
],
"symlink_target": ""
} |
import json
import signal
import sys
from tornado.httpserver import HTTPServer
from tornado.httputil import HTTPHeaders
import tornado.ioloop
from tornado.options import define, options
from tornado.web import URLSpec
from orders import Book, Buy, Sell
import constants
try:
from httplib import responses # py2
except ImportError:
from http.client import responses # py3
define("port", default=3000, help="run on the given port", type=int)
class BookHandler(tornado.web.RequestHandler):
"""
Handle GET requests to /book API endpoint.
"""
def get(self):
ret = Book().orders()
self.write(ret)
class OrderHandler(tornado.web.RequestHandler):
"""
Handle POST requests to /buy and /sell API endpoints.
"""
def post(self, **kwargs):
order = None
resp = None
body = json.loads(self.request.body)
if self.request.uri == "{}".format(constants.URL_PATH_BUY):
order = Buy(**body)
if self.request.uri == "{}".format(constants.URL_PATH_SELL):
order = Sell(**body)
if not order.is_valid():
resp = {"message": responses[constants.HTTP_400_BAD_REQUEST]}
self.set_status(constants.HTTP_400_BAD_REQUEST)
self.write(resp)
return
try:
resp = Book().match(order)
http_status_code = constants.HTTP_201_CREATED
except Exception as e:
resp = {"message": e.message}
http_status_code = constants.HTTP_500_INTERNAL_SERVER_ERROR
self.set_header("location", "{}://{}{}".format(self.request.protocol,
self.request.host, self.reverse_url("{}".format(constants.URL_NAME_BOOK))))
self.set_status(http_status_code)
self.write(resp)
def stop():
"""
Stop the IOLoop.
"""
tornado.ioloop.IOLoop.current().stop()
def sigint_handler(signum, frame):
"""
Add shutdown task in next IOLoop.
"""
tornado.ioloop.IOLoop.current().add_callback(stop)
def main():
tornado.options.parse_command_line()
application = tornado.web.Application([
URLSpec(
r"{}".format(constants.URL_PATH_BOOK),
BookHandler,
name="{}".format(constants.URL_NAME_BOOK)
),
URLSpec(
r"{}".format(constants.URL_PATH_BUY),
OrderHandler,
name="{}".format(constants.URL_NAME_BUY)
),
URLSpec(
r"{}".format(constants.URL_PATH_SELL),
OrderHandler,
name="{}".format(constants.URL_NAME_SELL)
),
])
http_server = HTTPServer(application)
http_server.listen(options.port)
signal.signal(signal.SIGINT, sigint_handler)
tornado.ioloop.IOLoop.current().start()
sys.exit(0)
if __name__ == "__main__":
main()
| {
"content_hash": "baf7f2debd571ca5f56555d7b4eefd34",
"timestamp": "",
"source": "github",
"line_count": 99,
"max_line_length": 87,
"avg_line_length": 28.616161616161616,
"alnum_prop": 0.6050123543946346,
"repo_name": "eigenholser/ddme",
"id": "f63b8dfefd79a8e83d5c69d234d503e29f19dd37",
"size": "2833",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "api.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "14027"
},
{
"name": "Shell",
"bytes": "1809"
}
],
"symlink_target": ""
} |
from __future__ import unicode_literals
from django.db import migrations
import django.utils.timezone
import model_utils.fields
class Migration(migrations.Migration):
dependencies = [
('plugins', '0003_auto_20170501_2124'),
]
operations = [
migrations.AddField(
model_name='downloadrelease',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='downloadrelease',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='plugin',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='plugin',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
migrations.AddField(
model_name='release',
name='created',
field=model_utils.fields.AutoCreatedField(default=django.utils.timezone.now, editable=False, verbose_name='created'),
),
migrations.AddField(
model_name='release',
name='modified',
field=model_utils.fields.AutoLastModifiedField(default=django.utils.timezone.now, editable=False, verbose_name='modified'),
),
]
| {
"content_hash": "a1146a8a8c4a99bc78d10d5983e4cffd",
"timestamp": "",
"source": "github",
"line_count": 45,
"max_line_length": 135,
"avg_line_length": 37.75555555555555,
"alnum_prop": 0.6327251324308417,
"repo_name": "ava-project/ava-website",
"id": "a47342d962e622cd4128baf21b47aa971f4a631c",
"size": "1770",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "website/apps/plugins/migrations/0004_auto_20170508_2027.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "176820"
},
{
"name": "HTML",
"bytes": "19026"
},
{
"name": "Makefile",
"bytes": "668"
},
{
"name": "Python",
"bytes": "87142"
},
{
"name": "Shell",
"bytes": "612"
}
],
"symlink_target": ""
} |
import re
from setuptools import find_packages, setup
def read_requirements(req_file):
return [line for line in re.sub(r"\s*#.*\n", "\n", req_file.read()).splitlines() if line]
with open("requirements/base.txt") as f:
REQUIREMENTS = read_requirements(f)
with open("requirements/test.txt") as f:
TEST_REQUIREMENTS = read_requirements(f)
with open("README.md") as f:
LONG_DESCRIPTION = f.read()
setup(
name="naovoce",
version="2.0.0a",
description="Na-ovoce.cz site backend",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/jsmesami/naovoce",
author="Ondřej Nejedlý",
author_email="jsmesami@gmail.com",
license="MIT License",
packages=find_packages(),
entry_points={"console_scripts": ["naovoce=naovoce.manage:main"]},
python_requires=">=3.9",
install_requires=REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
include_package_data=True,
zip_safe=False,
classifiers=[
"Environment :: Web Environment",
"Framework :: Django",
"Development Status :: 3 - Alpha",
"Operating System :: OS Independent",
"Topic :: Internet :: WWW/HTTP",
"Topic :: Internet :: WWW/HTTP :: Dynamic Content"
"License :: OSI Approved :: MIT License"
"Programming Language :: Python :: 3.9",
],
)
| {
"content_hash": "674e257c88914cfb969ea4e4c5eb3bb1",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 93,
"avg_line_length": 30.043478260869566,
"alnum_prop": 0.64616497829233,
"repo_name": "jsmesami/naovoce",
"id": "dca098f3e893a45cb92e908fb418a46c08b1ce50",
"size": "1406",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "API Blueprint",
"bytes": "18118"
},
{
"name": "Makefile",
"bytes": "658"
},
{
"name": "Python",
"bytes": "170165"
}
],
"symlink_target": ""
} |
import logging
import os
import socket
import subprocess
import sys
import tempfile
import time
from django.core.management.base import BaseCommand
import redisutils
import redis as redislib
log = logging.getLogger('z.redis')
# We process the keys in chunks of size CHUNK.
CHUNK = 3000
# Remove any sets with less than MIN or more than MAX elements.
MIN = 10
MAX = 50
# Expire keys after EXPIRE seconds.
EXPIRE = 60 * 5
# Calling redis can raise raise these errors.
RedisError = redislib.RedisError, socket.error
def vacuum(master, slave):
def keys():
ks = slave.keys()
log.info('There are %s keys to clean up.' % len(ks))
ks = iter(ks)
while 1:
buffer = []
for _ in xrange(CHUNK):
try:
buffer.append(ks.next())
except StopIteration:
yield buffer
return
yield buffer
tmp = tempfile.NamedTemporaryFile(delete=False)
for ks in keys():
tmp.write('\n'.join(ks))
tmp.close()
# It's hard to get Python to clean up the memory from slave.keys(), so
# we'll let the OS do it. argv[0] is a dummy argument, the rest get passed
# like a normal command line.
os.execl(sys.executable, 'argv[0]', sys.argv[0], sys.argv[1], tmp.name)
def cleanup(master, slave, filename):
tmp = open(filename)
total = [1, 0]
p = subprocess.Popen(['wc', '-l', filename], stdout=subprocess.PIPE)
total[0] = int(p.communicate()[0].strip().split()[0])
def file_keys():
while 1:
buffer = []
for _ in xrange(CHUNK):
line = tmp.readline()
if line:
buffer.append(line.strip())
else:
yield buffer
return
yield buffer
num = 0
for ks in file_keys():
pipe = slave.pipeline()
for k in ks:
pipe.scard(k)
try:
drop = [k for k, size in zip(ks, pipe.execute())
if 0 < size < MIN or size > MAX]
except RedisError:
continue
num += len(ks)
percent = round(float(num) / total[0] * 100, 1) if total[0] else 0
total[1] += len(drop)
log.debug('[%s %.1f%%] Dropping %s keys.' % (num, percent, len(drop)))
pipe = master.pipeline()
for k in drop:
pipe.expire(k, EXPIRE)
try:
pipe.execute()
except RedisError:
continue
time.sleep(1) # Poor man's rate limiting.
if total[0]:
log.info('Dropped %s keys [%.1f%%].' %
(total[1], round(float(total[1]) / total[0] * 100, 1)))
class Command(BaseCommand):
help = "Clean up the redis used by cache machine."
def handle(self, *args, **kw):
try:
master = redisutils.connections['cache']
slave = redisutils.connections['cache_slave']
except Exception:
log.error('Could not connect to redis.', exc_info=True)
return
if args:
filename = args[0]
try:
cleanup(master, slave, filename)
finally:
os.unlink(filename)
else:
vacuum(master, slave)
| {
"content_hash": "a6e361d96be9c1c3b9d2e00d8cb9e079",
"timestamp": "",
"source": "github",
"line_count": 118,
"max_line_length": 78,
"avg_line_length": 28.02542372881356,
"alnum_prop": 0.5400665255518597,
"repo_name": "jbalogh/zamboni",
"id": "2e83b6b8378d803dcf9bc6b2f43163d139739a10",
"size": "3307",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "apps/amo/management/commands/clean_redis.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "4145"
},
{
"name": "JavaScript",
"bytes": "1553612"
},
{
"name": "Python",
"bytes": "2860649"
},
{
"name": "Shell",
"bytes": "8095"
}
],
"symlink_target": ""
} |
from decouple import config
SECRET_KEY = "hi"
DEBUG = config('DEBUG', default=False, cast=bool)
ALLOWED_HOSTS = ['*']
DATABASES = {
"default": dict(
ENGINE='django.db.backends.sqlite3',
NAME=config('DB_NAME', default=':memory:'),
)
}
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'export_action',
'tests',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'tests.urls'
STATIC_URL = '/static/'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
| {
"content_hash": "45872a9aabaaf662a4ab3e33660f184d",
"timestamp": "",
"source": "github",
"line_count": 52,
"max_line_length": 70,
"avg_line_length": 25.942307692307693,
"alnum_prop": 0.6286137879911046,
"repo_name": "fgmacedo/django-export-action",
"id": "1952a9a3a7baa0d684a73f71b9404d5187d8394b",
"size": "1374",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "tests/settings.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "4610"
},
{
"name": "Makefile",
"bytes": "1589"
},
{
"name": "Python",
"bytes": "28942"
}
],
"symlink_target": ""
} |
"""
NAME
xpeem_magic.py
DESCRIPTION
Creates MagIC header file and convert XPEEM text files into a MagIC format measurement file.
SYNTAX
xpeem_magic.py [command line options]
OPTIONS
-h: prints the help message and quits
-ID: directory for input files, default = current directory
-WD: directory for output files, default = current directory
-specname SPECIMEN_NAME: specify the specimen full name
-specid SPECIMEN_ID: specify the specimen identifier (i.e., short name)
-spectype SPECIMEN_TYPE: specify the specimen type
-cite CITATION: specify the citation, default = This study
-specage SPECIMEN_AGE: specify the age of the specimen
-specage1s SPECIMENT_AGE_UNCERTAINTY: specify the 1-sigma uncertainty on the age of the specimen
-datemethod DATING_METHOD: specify the dating method, default = GM-ARAR
-dateunit DATING_UNIT: specify the dating unit, default = Ga
-method METHOD: specify the experiment method, default = LP-XPEEM-3
-sitenames SITE_NAMES: colon delimited list of names of sites, which corresponds to the K-T interfaces (i.e., "A,B")
-samplenb SAMPLES_NUMBERS: colon delimited list of numbers of samples for each site, which corresponds to the locations on each K-T interfaces (i.e., "36,36")
-int PALEOINTENSITY: colon delimited list of paleointensities for each site (i.e., "31,32")
-int1s UNCERTAINTY: colon delimited list of 1 sigma uncertainties in paleointensity for each site (i.e., "5,6")
-x X_PIXEL_SPACING: specify the x spacing of the measurement in meters, default = 9.488e-9
-y Y_PIXEL_SPACING: specify the y spacing of the measurement in meters, default = 9.709e-9
-measnum MEAS_NUMBER: specify the starting measurement number, default = 1
-expnum EXP_NUMBER: specify the starting number for labelling measurement files, default = 1
INPUT
The input text files are created from XPEEM average images.
Input file naming convention:
[1] 2-letter-identifier for the meteorite
[2] interface
[3] location (2 digit)
[4] "-"
[5] rotation (starting with "r")
[6] energy level (on/off)
[7] polarization (L/R)
Example: TeA01-r1offR.txt
Specimen = TeA01
Experiment name = TeA01-r1offR
The measurement files will be put in a directory named "measurements".
EXAMPLE
Command line for the example dataset:
python xpeem_magic.py -ID "." -WD "." -header -specname "Miles" -specid "Mi" -spectype "IIE iron meteorite" -cite "This study" -specage "4.408" -specage1s "0.009" -datemethod "GM-ARAR" -dateunit "Ga" -method "LP-XPEEM-3" -sitenames "A,B" -samplenb "36,36" -int "32,31" -int1s "5,6" -x 9.488e-9 -y 9.709e-9 -measnum 1 -expnum 1
"""
import sys,os
import numpy as np
from pmagpy import convert_2_magic as convert
def do_help():
"""
returns help string of script
"""
return __doc__
def main():
kwargs = {}
if '-h' in sys.argv:
help(__name__)
sys.exit()
if '-ID' in sys.argv:
ind=sys.argv.index('-ID')
kwargs['input_dir_path'] = sys.argv[ind+1]
else:
kwargs['input_dir_path'] = '.'
if '-WD' in sys.argv:
ind=sys.argv.index('-WD')
kwargs['output_dir_path'] = sys.argv[ind+1]
else:
kwargs['output_dir_path'] = '.'
if '-specname' in sys.argv:
ind=sys.argv.index('-specname')
kwargs['spec_name'] = sys.argv[ind+1]
if '-specid' in sys.argv:
ind=sys.argv.index('-specid')
kwargs['spec_id'] = sys.argv[ind+1]
if '-spectype' in sys.argv:
ind=sys.argv.index('-spectype')
kwargs['spec_type'] = sys.argv[ind+1]
if '-cite' in sys.argv:
ind=sys.argv.index('-cite')
kwargs['citation'] = sys.argv[ind+1]
else:
kwargs['citation'] = 'This study'
if '-specage' in sys.argv:
ind=sys.argv.index('-specage')
kwargs['spec_age'] = sys.argv[ind+1]
if '-specage1s' in sys.argv:
ind=sys.argv.index('-specage1s')
kwargs['spec_age_1s'] = sys.argv[ind+1]
if '-datemethod' in sys.argv:
ind=sys.argv.index('-datemethod')
kwargs['dating_method'] = sys.argv[ind+1]
else:
kwargs['dating_method'] = 'GM-ARAR'
if '-dateunit' in sys.argv:
ind=sys.argv.index('-dateunit')
kwargs['dating_unit'] = sys.argv[ind+1]
else:
kwargs['dating_unit'] = 'Ga'
if '-method' in sys.argv:
ind=sys.argv.index('-method')
kwargs['method'] = sys.argv[ind+1]
else:
kwargs['method'] = 'LP-XPEEM-3'
if '-sitenames' in sys.argv:
ind=sys.argv.index('-sitenames')
kwargs['sitenames'] = sys.argv[ind+1].split(",")
if '-samplenb' in sys.argv:
ind=sys.argv.index('-samplenb')
kwargs['nb_samples'] = sys.argv[ind+1].split(",")
if '-int' in sys.argv:
ind=sys.argv.index('-int')
kwargs['paleoint'] = sys.argv[ind+1].split(",")
if '-int1s' in sys.argv:
ind=sys.argv.index('-int1s')
kwargs['paleoint_1s'] = sys.argv[ind+1].split(",")
if '-x' in sys.argv:
ind=sys.argv.index('-x')
kwargs['x_spacing']=float(sys.argv[ind+1])
else:
kwargs['x_spacing']=9.488e-9
if '-y' in sys.argv:
ind=sys.argv.index('-y')
kwargs['y_spacing'] = float(sys.argv[ind+1])
else:
kwargs['x_spacing'] = 9.709e-9
if '-measnum' in sys.argv:
ind=sys.argv.index('-measnum')
kwargs['meas_num'] = int(sys.argv[ind+1])
else:
kwargs['meas_num'] = 1
if '-expnum' in sys.argv:
ind=sys.argv.index('-expnum')
kwargs['exp_num'] = int(sys.argv[ind+1])
else:
kwargs['exp_num'] = 1
convert.xpeem(**kwargs)
if __name__ == "__main__":
main()
| {
"content_hash": "2e690b678a8fbfc29c6f9e44f40d3fe2",
"timestamp": "",
"source": "github",
"line_count": 151,
"max_line_length": 330,
"avg_line_length": 38.37748344370861,
"alnum_prop": 0.6189818809318378,
"repo_name": "lfairchild/PmagPy",
"id": "d735b7cf54b5cf58ad3df498c5f8868760c09f71",
"size": "5817",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "programs/conversion_scripts/xpeem_magic.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "33903"
},
{
"name": "Inno Setup",
"bytes": "3675"
},
{
"name": "Jupyter Notebook",
"bytes": "29090864"
},
{
"name": "Python",
"bytes": "15912726"
},
{
"name": "Rich Text Format",
"bytes": "1104"
},
{
"name": "Shell",
"bytes": "9167"
},
{
"name": "TeX",
"bytes": "3146"
}
],
"symlink_target": ""
} |
"""Implementation of basic magic functions."""
import argparse
from logging import error
import io
from pprint import pformat
import sys
from warnings import warn
from traitlets.utils.importstring import import_item
from IPython.core import magic_arguments, page
from IPython.core.error import UsageError
from IPython.core.magic import Magics, magics_class, line_magic, magic_escapes
from IPython.utils.text import format_screen, dedent, indent
from IPython.testing.skipdoctest import skip_doctest
from IPython.utils.ipstruct import Struct
class MagicsDisplay(object):
def __init__(self, magics_manager, ignore=None):
self.ignore = ignore if ignore else []
self.magics_manager = magics_manager
def _lsmagic(self):
"""The main implementation of the %lsmagic"""
mesc = magic_escapes['line']
cesc = magic_escapes['cell']
mman = self.magics_manager
magics = mman.lsmagic()
out = ['Available line magics:',
mesc + (' '+mesc).join(sorted([m for m,v in magics['line'].items() if (v not in self.ignore)])),
'',
'Available cell magics:',
cesc + (' '+cesc).join(sorted([m for m,v in magics['cell'].items() if (v not in self.ignore)])),
'',
mman.auto_status()]
return '\n'.join(out)
def _repr_pretty_(self, p, cycle):
p.text(self._lsmagic())
def __str__(self):
return self._lsmagic()
def _jsonable(self):
"""turn magics dict into jsonable dict of the same structure
replaces object instances with their class names as strings
"""
magic_dict = {}
mman = self.magics_manager
magics = mman.lsmagic()
for key, subdict in magics.items():
d = {}
magic_dict[key] = d
for name, obj in subdict.items():
try:
classname = obj.__self__.__class__.__name__
except AttributeError:
classname = 'Other'
d[name] = classname
return magic_dict
def _repr_json_(self):
return self._jsonable()
@magics_class
class BasicMagics(Magics):
"""Magics that provide central IPython functionality.
These are various magics that don't fit into specific categories but that
are all part of the base 'IPython experience'."""
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-l', '--line', action='store_true',
help="""Create a line magic alias."""
)
@magic_arguments.argument(
'-c', '--cell', action='store_true',
help="""Create a cell magic alias."""
)
@magic_arguments.argument(
'name',
help="""Name of the magic to be created."""
)
@magic_arguments.argument(
'target',
help="""Name of the existing line or cell magic."""
)
@magic_arguments.argument(
'-p', '--params', default=None,
help="""Parameters passed to the magic function."""
)
@line_magic
def alias_magic(self, line=''):
"""Create an alias for an existing line or cell magic.
Examples
--------
::
In [1]: %alias_magic t timeit
Created `%t` as an alias for `%timeit`.
Created `%%t` as an alias for `%%timeit`.
In [2]: %t -n1 pass
1 loops, best of 3: 954 ns per loop
In [3]: %%t -n1
...: pass
...:
1 loops, best of 3: 954 ns per loop
In [4]: %alias_magic --cell whereami pwd
UsageError: Cell magic function `%%pwd` not found.
In [5]: %alias_magic --line whereami pwd
Created `%whereami` as an alias for `%pwd`.
In [6]: %whereami
Out[6]: u'/home/testuser'
In [7]: %alias_magic h history "-p -l 30" --line
Created `%h` as an alias for `%history -l 30`.
"""
args = magic_arguments.parse_argstring(self.alias_magic, line)
shell = self.shell
mman = self.shell.magics_manager
escs = ''.join(magic_escapes.values())
target = args.target.lstrip(escs)
name = args.name.lstrip(escs)
params = args.params
if (params and
((params.startswith('"') and params.endswith('"'))
or (params.startswith("'") and params.endswith("'")))):
params = params[1:-1]
# Find the requested magics.
m_line = shell.find_magic(target, 'line')
m_cell = shell.find_magic(target, 'cell')
if args.line and m_line is None:
raise UsageError('Line magic function `%s%s` not found.' %
(magic_escapes['line'], target))
if args.cell and m_cell is None:
raise UsageError('Cell magic function `%s%s` not found.' %
(magic_escapes['cell'], target))
# If --line and --cell are not specified, default to the ones
# that are available.
if not args.line and not args.cell:
if not m_line and not m_cell:
raise UsageError(
'No line or cell magic with name `%s` found.' % target
)
args.line = bool(m_line)
args.cell = bool(m_cell)
params_str = "" if params is None else " " + params
if args.line:
mman.register_alias(name, target, 'line', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['line'], name,
magic_escapes['line'], target, params_str))
if args.cell:
mman.register_alias(name, target, 'cell', params)
print('Created `%s%s` as an alias for `%s%s%s`.' % (
magic_escapes['cell'], name,
magic_escapes['cell'], target, params_str))
@line_magic
def lsmagic(self, parameter_s=''):
"""List currently available magic functions."""
return MagicsDisplay(self.shell.magics_manager, ignore=[])
def _magic_docs(self, brief=False, rest=False):
"""Return docstrings from magic functions."""
mman = self.shell.magics_manager
docs = mman.lsmagic_docs(brief, missing='No documentation')
if rest:
format_string = '**%s%s**::\n\n%s\n\n'
else:
format_string = '%s%s:\n%s\n'
return ''.join(
[format_string % (magic_escapes['line'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['line'].items())]
+
[format_string % (magic_escapes['cell'], fname,
indent(dedent(fndoc)))
for fname, fndoc in sorted(docs['cell'].items())]
)
@line_magic
def magic(self, parameter_s=''):
"""Print information about the magic function system.
Supported formats: -latex, -brief, -rest
"""
mode = ''
try:
mode = parameter_s.split()[0][1:]
except IndexError:
pass
brief = (mode == 'brief')
rest = (mode == 'rest')
magic_docs = self._magic_docs(brief, rest)
if mode == 'latex':
print(self.format_latex(magic_docs))
return
else:
magic_docs = format_screen(magic_docs)
out = ["""
IPython's 'magic' functions
===========================
The magic function system provides a series of functions which allow you to
control the behavior of IPython itself, plus a lot of system-type
features. There are two kinds of magics, line-oriented and cell-oriented.
Line magics are prefixed with the % character and work much like OS
command-line calls: they get as an argument the rest of the line, where
arguments are passed without parentheses or quotes. For example, this will
time the given statement::
%timeit range(1000)
Cell magics are prefixed with a double %%, and they are functions that get as
an argument not only the rest of the line, but also the lines below it in a
separate argument. These magics are called with two arguments: the rest of the
call line and the body of the cell, consisting of the lines below the first.
For example::
%%timeit x = numpy.random.randn((100, 100))
numpy.linalg.svd(x)
will time the execution of the numpy svd routine, running the assignment of x
as part of the setup phase, which is not timed.
In a line-oriented client (the terminal or Qt console IPython), starting a new
input with %% will automatically enter cell mode, and IPython will continue
reading input until a blank line is given. In the notebook, simply type the
whole cell as one entity, but keep in mind that the %% escape can only be at
the very start of the cell.
NOTE: If you have 'automagic' enabled (via the command line option or with the
%automagic function), you don't need to type in the % explicitly for line
magics; cell magics always require an explicit '%%' escape. By default,
IPython ships with automagic on, so you should only rarely need the % escape.
Example: typing '%cd mydir' (without the quotes) changes your working directory
to 'mydir', if it exists.
For a list of the available magic functions, use %lsmagic. For a description
of any of them, type %magic_name?, e.g. '%cd?'.
Currently the magic system has the following functions:""",
magic_docs,
"Summary of magic functions (from %slsmagic):" % magic_escapes['line'],
str(self.lsmagic()),
]
page.page('\n'.join(out))
@line_magic
def page(self, parameter_s=''):
"""Pretty print the object and display it through a pager.
%page [options] OBJECT
If no object is given, use _ (last output).
Options:
-r: page str(object), don't pretty-print it."""
# After a function contributed by Olivier Aubert, slightly modified.
# Process options/args
opts, args = self.parse_options(parameter_s, 'r')
raw = 'r' in opts
oname = args and args or '_'
info = self.shell._ofind(oname)
if info['found']:
txt = (raw and str or pformat)( info['obj'] )
page.page(txt)
else:
print('Object `%s` not found' % oname)
@line_magic
def pprint(self, parameter_s=''):
"""Toggle pretty printing on/off."""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.pprint = bool(1 - ptformatter.pprint)
print('Pretty printing has been turned',
['OFF','ON'][ptformatter.pprint])
@line_magic
def colors(self, parameter_s=''):
"""Switch color scheme for prompts, info system and exception handlers.
Currently implemented schemes: NoColor, Linux, LightBG.
Color scheme names are not case-sensitive.
Examples
--------
To get a plain black and white terminal::
%colors nocolor
"""
def color_switch_err(name):
warn('Error changing %s color schemes.\n%s' %
(name, sys.exc_info()[1]), stacklevel=2)
new_scheme = parameter_s.strip()
if not new_scheme:
raise UsageError(
"%colors: you must specify a color scheme. See '%colors?'")
# local shortcut
shell = self.shell
# Set shell colour scheme
try:
shell.colors = new_scheme
shell.refresh_style()
except:
color_switch_err('shell')
# Set exception colors
try:
shell.InteractiveTB.set_colors(scheme = new_scheme)
shell.SyntaxTB.set_colors(scheme = new_scheme)
except:
color_switch_err('exception')
# Set info (for 'object?') colors
if shell.color_info:
try:
shell.inspector.set_active_scheme(new_scheme)
except:
color_switch_err('object inspector')
else:
shell.inspector.set_active_scheme('NoColor')
@line_magic
def xmode(self, parameter_s=''):
"""Switch modes for the exception handlers.
Valid modes: Plain, Context, Verbose, and Minimal.
If called without arguments, acts as a toggle.
When in verbose mode the value --show (and --hide)
will respectively show (or hide) frames with ``__tracebackhide__ =
True`` value set.
"""
def xmode_switch_err(name):
warn('Error changing %s exception modes.\n%s' %
(name,sys.exc_info()[1]))
shell = self.shell
if parameter_s.strip() == "--show":
shell.InteractiveTB.skip_hidden = False
return
if parameter_s.strip() == "--hide":
shell.InteractiveTB.skip_hidden = True
return
new_mode = parameter_s.strip().capitalize()
try:
shell.InteractiveTB.set_mode(mode=new_mode)
print('Exception reporting mode:',shell.InteractiveTB.mode)
except:
xmode_switch_err('user')
@line_magic
def quickref(self, arg):
""" Show a quick reference sheet """
from IPython.core.usage import quick_reference
qr = quick_reference + self._magic_docs(brief=True)
page.page(qr)
@line_magic
def doctest_mode(self, parameter_s=''):
"""Toggle doctest mode on and off.
This mode is intended to make IPython behave as much as possible like a
plain Python shell, from the perspective of how its prompts, exceptions
and output look. This makes it easy to copy and paste parts of a
session into doctests. It does so by:
- Changing the prompts to the classic ``>>>`` ones.
- Changing the exception reporting mode to 'Plain'.
- Disabling pretty-printing of output.
Note that IPython also supports the pasting of code snippets that have
leading '>>>' and '...' prompts in them. This means that you can paste
doctests from files or docstrings (even if they have leading
whitespace), and the code will execute correctly. You can then use
'%history -t' to see the translated history; this will give you the
input after removal of all the leading prompts and whitespace, which
can be pasted back into an editor.
With these features, you can switch into this mode easily whenever you
need to do testing and changes to doctests, without having to leave
your existing IPython session.
"""
# Shorthands
shell = self.shell
meta = shell.meta
disp_formatter = self.shell.display_formatter
ptformatter = disp_formatter.formatters['text/plain']
# dstore is a data store kept in the instance metadata bag to track any
# changes we make, so we can undo them later.
dstore = meta.setdefault('doctest_mode',Struct())
save_dstore = dstore.setdefault
# save a few values we'll need to recover later
mode = save_dstore('mode',False)
save_dstore('rc_pprint',ptformatter.pprint)
save_dstore('xmode',shell.InteractiveTB.mode)
save_dstore('rc_separate_out',shell.separate_out)
save_dstore('rc_separate_out2',shell.separate_out2)
save_dstore('rc_separate_in',shell.separate_in)
save_dstore('rc_active_types',disp_formatter.active_types)
if not mode:
# turn on
# Prompt separators like plain python
shell.separate_in = ''
shell.separate_out = ''
shell.separate_out2 = ''
ptformatter.pprint = False
disp_formatter.active_types = ['text/plain']
shell.magic('xmode Plain')
else:
# turn off
shell.separate_in = dstore.rc_separate_in
shell.separate_out = dstore.rc_separate_out
shell.separate_out2 = dstore.rc_separate_out2
ptformatter.pprint = dstore.rc_pprint
disp_formatter.active_types = dstore.rc_active_types
shell.magic('xmode ' + dstore.xmode)
# mode here is the state before we switch; switch_doctest_mode takes
# the mode we're switching to.
shell.switch_doctest_mode(not mode)
# Store new mode and inform
dstore.mode = bool(not mode)
mode_label = ['OFF','ON'][dstore.mode]
print('Doctest mode is:', mode_label)
@line_magic
def gui(self, parameter_s=''):
"""Enable or disable IPython GUI event loop integration.
%gui [GUINAME]
This magic replaces IPython's threaded shells that were activated
using the (pylab/wthread/etc.) command line flags. GUI toolkits
can now be enabled at runtime and keyboard
interrupts should work without any problems. The following toolkits
are supported: wxPython, PyQt4, PyGTK, Tk and Cocoa (OSX)::
%gui wx # enable wxPython event loop integration
%gui qt4|qt # enable PyQt4 event loop integration
%gui qt5 # enable PyQt5 event loop integration
%gui gtk # enable PyGTK event loop integration
%gui gtk3 # enable Gtk3 event loop integration
%gui tk # enable Tk event loop integration
%gui osx # enable Cocoa event loop integration
# (requires %matplotlib 1.1)
%gui # disable all event loop integration
WARNING: after any of these has been called you can simply create
an application object, but DO NOT start the event loop yourself, as
we have already handled that.
"""
opts, arg = self.parse_options(parameter_s, '')
if arg=='': arg = None
try:
return self.shell.enable_gui(arg)
except Exception as e:
# print simple error message, rather than traceback if we can't
# hook up the GUI
error(str(e))
@skip_doctest
@line_magic
def precision(self, s=''):
"""Set floating point precision for pretty printing.
Can set either integer precision or a format string.
If numpy has been imported and precision is an int,
numpy display precision will also be set, via ``numpy.set_printoptions``.
If no argument is given, defaults will be restored.
Examples
--------
::
In [1]: from math import pi
In [2]: %precision 3
Out[2]: u'%.3f'
In [3]: pi
Out[3]: 3.142
In [4]: %precision %i
Out[4]: u'%i'
In [5]: pi
Out[5]: 3
In [6]: %precision %e
Out[6]: u'%e'
In [7]: pi**10
Out[7]: 9.364805e+04
In [8]: %precision
Out[8]: u'%r'
In [9]: pi**10
Out[9]: 93648.047476082982
"""
ptformatter = self.shell.display_formatter.formatters['text/plain']
ptformatter.float_precision = s
return ptformatter.float_format
@magic_arguments.magic_arguments()
@magic_arguments.argument(
'-e', '--export', action='store_true', default=False,
help=argparse.SUPPRESS
)
@magic_arguments.argument(
'filename', type=str,
help='Notebook name or filename'
)
@line_magic
def notebook(self, s):
"""Export and convert IPython notebooks.
This function can export the current IPython history to a notebook file.
For example, to export the history to "foo.ipynb" do "%notebook foo.ipynb".
The -e or --export flag is deprecated in IPython 5.2, and will be
removed in the future.
"""
args = magic_arguments.parse_argstring(self.notebook, s)
from nbformat import write, v4
cells = []
hist = list(self.shell.history_manager.get_range())
if(len(hist)<=1):
raise ValueError('History is empty, cannot export')
for session, execution_count, source in hist[:-1]:
cells.append(v4.new_code_cell(
execution_count=execution_count,
source=source
))
nb = v4.new_notebook(cells=cells)
with io.open(args.filename, 'w', encoding='utf-8') as f:
write(nb, f, version=4)
@magics_class
class AsyncMagics(BasicMagics):
@line_magic
def autoawait(self, parameter_s):
"""
Allow to change the status of the autoawait option.
This allow you to set a specific asynchronous code runner.
If no value is passed, print the currently used asynchronous integration
and whether it is activated.
It can take a number of value evaluated in the following order:
- False/false/off deactivate autoawait integration
- True/true/on activate autoawait integration using configured default
loop
- asyncio/curio/trio activate autoawait integration and use integration
with said library.
- `sync` turn on the pseudo-sync integration (mostly used for
`IPython.embed()` which does not run IPython with a real eventloop and
deactivate running asynchronous code. Turning on Asynchronous code with
the pseudo sync loop is undefined behavior and may lead IPython to crash.
If the passed parameter does not match any of the above and is a python
identifier, get said object from user namespace and set it as the
runner, and activate autoawait.
If the object is a fully qualified object name, attempt to import it and
set it as the runner, and activate autoawait.
The exact behavior of autoawait is experimental and subject to change
across version of IPython and Python.
"""
param = parameter_s.strip()
d = {True: "on", False: "off"}
if not param:
print("IPython autoawait is `{}`, and set to use `{}`".format(
d[self.shell.autoawait],
self.shell.loop_runner
))
return None
if param.lower() in ('false', 'off'):
self.shell.autoawait = False
return None
if param.lower() in ('true', 'on'):
self.shell.autoawait = True
return None
if param in self.shell.loop_runner_map:
self.shell.loop_runner, self.shell.autoawait = self.shell.loop_runner_map[param]
return None
if param in self.shell.user_ns :
self.shell.loop_runner = self.shell.user_ns[param]
self.shell.autoawait = True
return None
runner = import_item(param)
self.shell.loop_runner = runner
self.shell.autoawait = True
| {
"content_hash": "2261393a59b21cf492b3cd3377040a32",
"timestamp": "",
"source": "github",
"line_count": 663,
"max_line_length": 112,
"avg_line_length": 34.716440422322776,
"alnum_prop": 0.5871746969631142,
"repo_name": "sserrot/champion_relationships",
"id": "a8feb7553865dd7daf8abd30c79575299b4c87fc",
"size": "23017",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "venv/Lib/site-packages/IPython/core/magics/basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "128"
},
{
"name": "HTML",
"bytes": "18324224"
},
{
"name": "Jupyter Notebook",
"bytes": "9131072"
},
{
"name": "Python",
"bytes": "10702"
}
],
"symlink_target": ""
} |
from setuptools import setup, find_packages
setup(
name='wikimedia_thumbor_proxy_loader',
version='0.1.1',
url='https://github.com/wikimedia/thumbor-proxy-loader',
license='MIT',
author='Gilles Dubuc, Wikimedia Foundation',
description='Thumbor proxy loader',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
platforms='any',
install_requires=[
'thumbor',
],
extras_require={
'tests': [
'pyvows',
'coverage',
],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| {
"content_hash": "f3fd1b1e02001e4923e9d347ebe15f2c",
"timestamp": "",
"source": "github",
"line_count": 32,
"max_line_length": 70,
"avg_line_length": 26.90625,
"alnum_prop": 0.5900116144018583,
"repo_name": "wikimedia/thumbor-proxy-loader",
"id": "e496a4d2195877cee2c57736ec6959eae73e451c",
"size": "886",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "2358"
}
],
"symlink_target": ""
} |