repo_name
stringlengths 7
84
| path
stringlengths 5
184
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 978
477k
| license
stringclasses 15
values |
---|---|---|---|---|---|
YuncyYe/ml | mlf/pocketv1.py | 1 | 3503 |
#
#pocket Algorithm
#
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as stats
import random
##############################################
def sepLine(w, x):
return -((w[0]+w[1]*x)/w[2])
#end
def drawSepLine(w, minX, maxX):
sepx = range(minX, maxX)
sepy = []
for e in sepx:
tmp = sepLine(w, e)
sepy.append( tmp )
#end for
plt.plot(sepx, sepy )
#end drawSepLine
##############################################
ls=np.array([
[1.0, 0.5, 1]
,[1.5, 14.5, 1]
,[2.5, 1.5, 1]
,[2.8, 3.5, 1]
,[4.5, 13.0, 1]
,[6.0, 8.0, 1]
,[7.0, 16.0, 1] #noize data
,[8.0, 5.5, 1]
,[9.5, 7.0, 1]
,[12.0, 2.5, 1]
,[14.0, 2.0, 1]
#,[7.0, 16.0, 1] #noize data
])
rs=np.array([
[2.0, 18.0, -1]
,[3.0, 17.5, -1]
,[3.5, 0.7, -1] #noize data
,[8.0,11.5, -1]
,[8.5,13.5, -1]
,[8.5,13.0, -1]
,[9.0,15, -1]
,[12.0,20.0,-1]
,[16.0,17.0,-1]
#,[3.5, 0.7, -1] #noize data
])
##construct training data
rtd = np.concatenate((ls,rs))
minX = (int)(np.min(rtd[:,:1]))-3
maxX = (int)(np.max(rtd[:,:1]))+3
###plot the data
plt.xlim( (minX, maxX) )
plt.ylim( (np.min(rtd[:,1:2]-3), np.max(rtd[:,1:2]+3)) )
plt.plot(ls[:,:1], ls[:, 1:2], '*')
plt.plot(rs[:,:1], rs[:, 1:2], '+')
##############pla-begin
x0 = np.zeros( (len(rtd), 1) )
x0[:]=1.0
td = np.concatenate( (x0, rtd[:,:1], rtd[:,1:2], rtd[:,2:3]), 1 )
#The this initial value of w. td[0] include y. so we need to minus 1
w=np.zeros( len(td[0])-1 );
#todo:we can set it as max of float
weighOfPocket=1000000000.0
wPocket=w
#
#ensure all point corret
#maxIter=900000
maxIter=1200000
weighOfPocketThres=0.05
curIter=0
while(curIter<maxIter):
curIter = curIter +1;
#[begin----the following is typical pla----
isModifing=False;
#check each point for w
for ti in range(len(td)):
rndIdx=random.randint(0, len(td)-1)
sample = td[rndIdx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
t = np.inner(w, sx)
ty = np.sign(t)
#print(idx, ty, sy)
if(ty!=sy):
#failed, we need to update w
w = w + sy*sx
isModifing = True
#end if
#end for
if(isModifing==False):
break;
#todo. we need to update pocket here.
#end]
#pick up an element in sample to try to improve w
#rndIdx=random.randint(0, len(td)-1)
#sample = td[rndIdx]
#sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
#w = w + sy*sx
#It's too late to check weight for this w
#calc weight for w
weight=0.;
for idx in range(len(td)):
sample = td[idx]
sx = sample[:len(sample)-1]; sy=sample[len(sample)-1]
t = np.inner(w, sx)
ty = np.sign(t)
#print(idx, ty, sy)
if(ty!=sy):
weight += 1.0;
#end for
#print("The curIter is ", curIter)
#print("The weighOfPocket is ", weighOfPocket)
#print("The w is ", w)
#drawSepLine(w, minX, maxX)
#if the new w is better than stuff in pocket, then update stuff in pocket
if(weight<weighOfPocket):
weighOfPocket = weight
wPocket = w
#end if
if(weighOfPocket<weighOfPocketThres):
break;
#end for
##############pla-end
print("The curIter is ", curIter)
print("The weighOfPocket is ", weighOfPocket)
print("The w is ", w)
#show the seperator line
drawSepLine(w, minX, maxX);
###
#In [93]: import pla
#In [94]: reload(pla)
#
if __name__ =="__main__":
pass
#end
| apache-2.0 |
3like3beer/openrevman | openrevman/control_computer/solver.py | 1 | 8426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
import collections
import pulp
from numpy import dot
from pandas import DataFrame, read_table
from scipy.sparse import csgraph
class Controls:
def __init__(self, accepted_demand: DataFrame, product_bid_prices: DataFrame, expected_revenue: float = None):
self.accepted_demand = accepted_demand
self.product_bid_prices = product_bid_prices
self.expected_revenue = expected_revenue
class Problem:
def __init__(self, demand_vector, price_vector, capacity_vector, demand_utilization_matrix, demand_profile=None):
self.demand_vector = demand_vector
self.price_vector = price_vector
self.capacity_vector = capacity_vector
self.demand_utilization_matrix = demand_utilization_matrix
self.demand_profile = demand_profile
self.demand_correlations = self.get_demand_correlations()
def get_demand_correlations(self):
return dot(self.demand_utilization_matrix, self.demand_utilization_matrix.transpose())
def get_subproblems(self, eps=0.1):
subproblems = []
labels = csgraph.connected_components(self.demand_correlations, directed=False)[1]
split_index = collections.Counter(labels).values()
prev = 0
for i in split_index:
demand_vector = self.demand_vector[prev:prev + i]
price_vector = self.price_vector[prev:prev + i]
capacity_vector = self.capacity_vector
demand_utilization_matrix = self.demand_utilization_matrix.ix[prev:prev + i, :]
demand_profile = None
if self.demand_profile is not None:
demand_profile = self.demand_profile.ix[prev:prev + i - 1, :]
subproblems.append(
Problem(demand_vector=demand_vector, price_vector=price_vector, capacity_vector=capacity_vector,
demand_utilization_matrix=demand_utilization_matrix, demand_profile=demand_profile))
prev = i
return subproblems
class Solver:
def __init__(self, optimizer):
self.optimizer = optimizer
self.controls = None
def optimize_controls(self, problem):
self.controls = pulp_solve(problem.demand_vector, problem.price_vector, problem.capacity_vector,
problem.demand_utilization_matrix)
return self.controls
def optimize_controls_multi_period(self, problem, eps):
if problem.demand_profile.shape[1] > 1:
for period in problem.demand_profile.columns:
if self.controls:
new_control = pulp_solve(problem.demand_profile.ix[:, period], problem.price_vector,
problem.capacity_vector,
problem.demand_utilization_matrix)
if self.is_new_ctrl_more_profitable(new_control, 0.1):
self.blinde_control(new_control, eps)
else:
self.controls = self.optimize_controls(problem)
else:
self.controls = self.optimize_controls(problem)
return self.controls
def is_new_ctrl_more_profitable(self, new_control, eps):
rev1 = self.controls.expected_revenue
if new_control.expected_revenue - rev1 > rev1 * eps:
return True
return False
def blinde_control(self, new_control, eps):
self.controls.accepted_demand = self.controls.accepted_demand * eps
self.controls.product_bid_prices = self.controls.product_bid_prices / eps
self.controls.expected_revenue = new_control.expected_revenue
def to_data_frame(data):
df = DataFrame.transpose(read_table(data, delim_whitespace=True, header=None))
df.columns = [(col + 1) for col in df.columns]
return df
def to_data_frame2(data):
df = DataFrame(read_table(data, delim_whitespace=True, header=None))
return df
def create_problem_with_data(demand_data, capacity_data, demand_utilization_data, demand_profile_data=None):
demand_vector, capacity_vector, demand_profile, demand_utilization_matrix = load_data_to_df(capacity_data,
demand_data,
demand_profile_data,
demand_utilization_data)
return Problem(demand_vector.ix[:, 1], demand_vector.ix[:, 2], capacity_vector,
demand_utilization_matrix.ix[:, :],
demand_profile)
def merge_controls(controls_list):
first_time = True
for controls in controls_list:
if first_time:
accepted_demand = controls.accepted_demand
product_bid_prices = controls.product_bid_prices
expected_revenue = controls.expected_revenue
first_time = False
else:
accepted_demand = accepted_demand.append(controls.accepted_demand)
product_bid_prices = product_bid_prices.append(controls.product_bid_prices)
expected_revenue = expected_revenue + controls.expected_revenue
return Controls(accepted_demand=accepted_demand, product_bid_prices=product_bid_prices,
expected_revenue=expected_revenue)
def load_data_to_df(capacity_data, demand_data, demand_profile_data, demand_utilization_data):
demand_vector = to_data_frame(demand_data)
capacity_vector = to_data_frame(capacity_data)
demand_utilization_matrix = to_data_frame2(demand_utilization_data)
assert demand_utilization_matrix.shape[0] == demand_vector.shape[0]
assert demand_utilization_matrix.shape[1] == capacity_vector.shape[0]
if demand_profile_data:
demand_profile = to_data_frame(demand_profile_data)
assert demand_profile.shape[0] == demand_vector.shape[0]
else:
demand_profile = None
return demand_vector, capacity_vector, demand_profile, demand_utilization_matrix
def pulp_solve(demand_vector, price_vector, capacity_vector, demand_utilization_matrix):
revman = create_problem()
x = create_variables(demand_vector)
set_objective(demand_vector, price_vector, revman, x)
add_product_constraints(capacity_vector, demand_utilization_matrix, demand_vector, revman, x)
add_demand_constraints(demand_vector, revman, x)
solve_problem(revman)
accepted_demand = get_accepted_demand(x)
product_bid_prices = get_bid_prices(capacity_vector, revman)
expected_revenue = get_expected_revenue(revman)
return Controls((accepted_demand), (product_bid_prices), expected_revenue)
def solve_problem(revman):
revman.solve(pulp.PULP_CBC_CMD())
# revman.writeLP("temp.txt")
# print(pulp.LpStatus[revman.status])
def create_problem():
return pulp.LpProblem("revman", pulp.LpMaximize)
def get_expected_revenue(revman):
return pulp.value(revman.objective)
def get_accepted_demand(x):
return DataFrame({'accepted_demand': [(x[str(i)].value()) for i in x]})
def get_bid_prices(capacity_vector, revman):
bid_prices_list = [revman.constraints.get("Capa_" + str(i)).pi for (i, c) in (capacity_vector.iterrows())]
return DataFrame({'bid_prices_list': bid_prices_list})
def add_demand_constraints(demand_vector, revman, x):
for (demand_index, demand) in (demand_vector.iteritems()):
revman.addConstraint((x[str(demand_index)]) <= demand, name="Demand_" + str(demand_index))
def add_product_constraints(capacity_vector, demand_utilization_matrix, demand_vector, revman, x):
for (product_index, capacity) in (capacity_vector.iterrows()):
revman.addConstraint(pulp.lpSum(
[x[str(i)] * demand_utilization_matrix.ix[i, product_index] for (i, d) in demand_vector.iteritems()]) <=
capacity,
name="Capa_" + str(product_index))
def set_objective(demand_vector, price_vector, revman, x):
objective = pulp.LpAffineExpression([(x[str(i)], price_vector[i]) for (i, d) in demand_vector.iteritems()])
revman.setObjective(objective)
def create_variables(demand_vector):
x = dict([(str(i), pulp.LpVariable(name="x" + str(i), lowBound=0, cat=pulp.LpContinuous)) for (i, t) in
demand_vector.iteritems()])
return x
| gpl-3.0 |
fosfataza/protwis | mutational_landscape/views.py | 1 | 34851 | from django.shortcuts import get_object_or_404, render
from django.http import HttpResponse
from django.core.cache import cache
from django.db.models import Count, Min, Sum, Avg, Q
from django.core.cache import cache
from django.views.decorators.cache import cache_page
from protein.models import Protein, ProteinConformation, ProteinAlias, ProteinFamily, Gene, ProteinGProtein, ProteinGProteinPair
from residue.models import Residue, ResiduePositionSet, ResidueSet
from mutational_landscape.models import NaturalMutations, CancerMutations, DiseaseMutations, PTMs, NHSPrescribings
from common.diagrams_gpcr import DrawHelixBox, DrawSnakePlot
from drugs.models import Drugs
from mutation.functions import *
from mutation.models import *
from interaction.models import *
from interaction.views import ajax #import x-tal interactions
from common import definitions
from collections import OrderedDict
from common.views import AbsTargetSelection
from common.views import AbsSegmentSelection
from family.views import linear_gradient, color_dict, RGB_to_hex, hex_to_RGB
import re
import json
import numpy as np
from collections import OrderedDict
from copy import deepcopy
from io import BytesIO
import re
import math
import urllib
import xlsxwriter #sudo pip3 install XlsxWriter
import operator
class TargetSelection(AbsTargetSelection):
step = 1
number_of_steps = 1
filters = False
psets = False
# docs = 'mutations.html#mutation-browser'
selection_boxes = OrderedDict([
('reference', False),
('targets', True),
('segments', False),
])
buttons = {
'continue': {
'label': 'Show missense variants',
'url': '/mutational_landscape/render',
'color': 'success',
},
}
default_species = False
def render_variants(request, protein=None, family=None, download=None, receptor_class=None, gn=None, aa=None, **response_kwargs):
simple_selection = request.session.get('selection', False)
proteins = []
if protein: # if protein static page
proteins.append(Protein.objects.get(entry_name=protein.lower()))
target_type = 'protein'
# flatten the selection into individual proteins
if simple_selection:
for target in simple_selection.targets:
if target.type == 'protein':
proteins.append(target.item)
elif target.type == 'family':
target_type = 'family'
familyname = target.item
# species filter
species_list = []
for species in simple_selection.species:
species_list.append(species.item)
# annotation filter
protein_source_list = []
for protein_source in simple_selection.annotation:
protein_source_list.append(protein_source.item)
if species_list:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
species__in=(species_list),
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
else:
family_proteins = Protein.objects.filter(family__slug__startswith=target.item.slug,
source__in=(protein_source_list)).select_related('residue_numbering_scheme', 'species')
for fp in family_proteins:
proteins.append(fp)
NMs = NaturalMutations.objects.filter(Q(protein__in=proteins)).prefetch_related('residue__generic_number','residue__display_generic_number','residue__protein_segment','protein')
ptms = PTMs.objects.filter(Q(protein__in=proteins)).prefetch_related('residue')
ptms_dict = {}
## MICROSWITCHES
micro_switches_rset = ResiduePositionSet.objects.get(name="Microswitches")
ms_label = []
for residue in micro_switches_rset.residue_position.all():
ms_label.append(residue.label)
ms_object = Residue.objects.filter(protein_conformation__protein=proteins[0], generic_number__label__in=ms_label)
ms_sequence_numbers = []
for ms in ms_object:
ms_sequence_numbers.append(ms.sequence_number)
## SODIUM POCKET
sodium_pocket_rset = ResiduePositionSet.objects.get(name="Sodium pocket")
sp_label = []
for residue in sodium_pocket_rset.residue_position.all():
sp_label.append(residue.label)
sp_object = Residue.objects.filter(protein_conformation__protein=proteins[0], generic_number__label__in=ms_label)
sp_sequence_numbers = []
for sp in sp_object:
sp_sequence_numbers.append(sp.sequence_number)
for ptm in ptms:
ptms_dict[ptm.residue.sequence_number] = ptm.modification
## G PROTEIN INTERACTION POSITIONS
# THIS SHOULD BE CLASS SPECIFIC (different set)
rset = ResiduePositionSet.objects.get(name='Signalling protein pocket')
gprotein_generic_set = []
for residue in rset.residue_position.all():
gprotein_generic_set.append(residue.label)
### GET LB INTERACTION DATA
# get also ortholog proteins, which might have been crystallised to extract
# interaction data also from those
if protein:
orthologs = Protein.objects.filter(family__slug=proteins[0].family.slug, sequence_type__slug='wt')
else:
orthologs = Protein.objects.filter(family__slug__startswith=proteins[0].family.slug, sequence_type__slug='wt')
interactions = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__parent__in=orthologs, structure_ligand_pair__annotated=True).exclude(interaction_type__type ='hidden').all()
interaction_data = {}
for interaction in interactions:
if interaction.rotamer.residue.generic_number:
sequence_number = interaction.rotamer.residue.sequence_number
# sequence_number = lookup[interaction.rotamer.residue.generic_number.label]
label = interaction.rotamer.residue.generic_number.label
aa = interaction.rotamer.residue.amino_acid
interactiontype = interaction.interaction_type.name
if sequence_number not in interaction_data:
interaction_data[sequence_number] = []
if interactiontype not in interaction_data[sequence_number]:
interaction_data[sequence_number].append(interactiontype)
if target_type == 'family':
pc = ProteinConformation.objects.get(protein__family__name=familyname, protein__sequence_type__slug='consensus')
residuelist = Residue.objects.filter(protein_conformation=pc).order_by('sequence_number').prefetch_related('protein_segment', 'generic_number', 'display_generic_number')
else:
residuelist = Residue.objects.filter(protein_conformation__protein=proteins[0]).prefetch_related('protein_segment', 'display_generic_number', 'generic_number')
jsondata = {}
for NM in NMs:
functional_annotation = ''
SN = NM.residue.sequence_number
if NM.residue.generic_number:
GN = NM.residue.generic_number.label
else:
GN = ''
if SN in sp_sequence_numbers:
functional_annotation += 'SodiumPocket '
if SN in ms_sequence_numbers:
functional_annotation += 'MicroSwitch '
if SN in ptms_dict:
functional_annotation += 'PTM (' + ptms_dict[SN] + ') '
if SN in interaction_data:
functional_annotation += 'LB (' + ', '.join(interaction_data[SN]) + ') '
if GN in gprotein_generic_set:
functional_annotation += 'GP (contact) '
ms_type = NM.type
if ms_type == 'missense':
effect = 'deleterious' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else 'tolerated'
color = '#e30e0e' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else '#70c070'
else:
effect = 'deleterious'
color = '#575c9d'
# account for multiple mutations at this position!
NM.functional_annotation = functional_annotation
# print(NM.functional_annotation)
jsondata[SN] = [NM.amino_acid, NM.allele_frequency, NM.allele_count, NM.allele_number, NM.number_homozygotes, NM.type, effect, color, functional_annotation]
natural_mutation_list = {}
max_snp_pos = 1
for NM in NMs:
if NM.residue.generic_number:
if NM.residue.generic_number.label in natural_mutation_list:
natural_mutation_list[NM.residue.generic_number.label]['val'] += 1
if not str(NM.amino_acid) in natural_mutation_list[NM.residue.generic_number.label]['AA']:
natural_mutation_list[NM.residue.generic_number.label]['AA'] = natural_mutation_list[NM.residue.generic_number.label]['AA'] + str(NM.amino_acid) + ' '
if natural_mutation_list[NM.residue.generic_number.label]['val'] > max_snp_pos:
max_snp_pos = natural_mutation_list[NM.residue.generic_number.label]['val']
else:
natural_mutation_list[NM.residue.generic_number.label] = {'val':1, 'AA': NM.amino_acid + ' '}
jsondata_natural_mutations = {}
for r in residuelist:
if r.generic_number:
if r.generic_number.label in natural_mutation_list:
jsondata_natural_mutations[r.sequence_number] = natural_mutation_list[r.generic_number.label]
jsondata_natural_mutations['color'] = linear_gradient(start_hex="#c79494", finish_hex="#c40100", n=max_snp_pos)
# jsondata_cancer_mutations['color'] = linear_gradient(start_hex="#d8baff", finish_hex="#422d65", n=max_cancer_pos)
# jsondata_disease_mutations['color'] = linear_gradient(start_hex="#ffa1b1", finish_hex="#6e000b", n=max_disease_pos)
#
SnakePlot = DrawSnakePlot(residuelist, "Class A", protein, nobuttons=1)
HelixBox = DrawHelixBox(residuelist, 'Class A', protein, nobuttons=1)
# EXCEL TABLE EXPORT
if download:
data = []
for r in NMs:
values = r.__dict__
data.append(values)
headers = ['type', 'amino_acid', 'allele_count', 'allele_number', 'allele_frequency', 'polyphen_score', 'sift_score', 'number_homozygotes', 'functional_annotation']
# EXCEL SOLUTION
output = BytesIO()
workbook = xlsxwriter.Workbook(output)
worksheet = workbook.add_worksheet()
col = 0
for h in headers:
worksheet.write(0, col, h)
col += 1
row = 1
for d in data:
col = 0
for h in headers:
worksheet.write(row, col, str(d[h]))
col += 1
row += 1
workbook.close()
output.seek(0)
xlsx_data = output.read()
response = HttpResponse(xlsx_data, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=GPCRdb_' + proteins[0].entry_name + '_variant_data.xlsx' # % 'mutations'
return response
return render(request, 'browser.html', {'mutations': NMs, 'type': target_type, 'HelixBox': HelixBox, 'SnakePlot': SnakePlot, 'receptor': str(proteins[0].entry_name), 'mutations_pos_list': json.dumps(jsondata), 'natural_mutations_pos_list': json.dumps(jsondata_natural_mutations)})
def ajaxNaturalMutation(request, slug, **response_kwargs):
name_of_cache = 'ajaxNaturalMutation_'+slug
ptms = PTMs.objects.filter(protein__entry_name=slug).prefetch_related('residue')
ptms_dict = {}
for ptm in ptms:
ptms_dict[ptm.residue.sequence_number] = ptm.modification
## MICROSWITCHES
micro_switches_rset = ResiduePositionSet.objects.get(name="Microswitches")
ms_label = []
for residue in micro_switches_rset.residue_position.all():
ms_label.append(residue.label)
ms_object = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=ms_label)
ms_sequence_numbers = []
for ms in ms_object:
ms_sequence_numbers.append(ms.sequence_number)
## SODIUM POCKET
sodium_pocket_rset = ResiduePositionSet.objects.get(name="Sodium pocket")
sp_label = []
for residue in sodium_pocket_rset.residue_position.all():
sp_label.append(residue.label)
sp_object = Residue.objects.filter(protein_conformation__protein__entry_name=slug, generic_number__label__in=ms_label)
sp_sequence_numbers = []
for sp in sp_object:
sp_sequence_numbers.append(sp.sequence_number)
## G PROTEIN INTERACTION POSITIONS
# THIS SHOULD BE CLASS SPECIFIC (different set)
rset = ResiduePositionSet.objects.get(name='Signalling protein pocket')
gprotein_generic_set = []
for residue in rset.residue_position.all():
gprotein_generic_set.append(residue.label)
### GET LB INTERACTION DATA
# get also ortholog proteins, which might have been crystallised to extract
# interaction data also from those
p = Protein.objects.get(entry_name=slug)
orthologs = Protein.objects.filter(family__slug__startswith=p.family.slug, sequence_type__slug='wt')
interactions = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__parent__in=orthologs, structure_ligand_pair__annotated=True).exclude(interaction_type__type ='hidden').order_by('rotamer__residue__sequence_number')
interaction_data = {}
for interaction in interactions:
if interaction.rotamer.residue.generic_number:
sequence_number = interaction.rotamer.residue.sequence_number
# sequence_number = lookup[interaction.rotamer.residue.generic_number.label]
label = interaction.rotamer.residue.generic_number.label
aa = interaction.rotamer.residue.amino_acid
interactiontype = interaction.interaction_type.name
if sequence_number not in interaction_data:
interaction_data[sequence_number] = []
if interactiontype not in interaction_data[sequence_number]:
interaction_data[sequence_number].append(interactiontype)
jsondata = cache.get(name_of_cache)
if jsondata == None:
jsondata = {}
NMs = NaturalMutations.objects.filter(protein__entry_name=slug).prefetch_related('residue')
for NM in NMs:
SN = NM.residue.sequence_number
type = NM.type
if type == 'missense':
effect = 'deleterious' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else 'tolerated'
color = '#e30e0e' if NM.sift_score <= 0.05 or NM.polyphen_score >= 0.1 else '#70c070'
else:
effect = 'deleterious'
color = '#575c9d'
functional_annotation = ''
SN = NM.residue.sequence_number
if NM.residue.generic_number:
GN = NM.residue.generic_number.label
else:
GN = ''
if SN in sp_sequence_numbers:
functional_annotation += 'SodiumPocket '
if SN in ms_sequence_numbers:
functional_annotation += 'MicroSwitch '
if SN in ptms_dict:
functional_annotation += 'PTM (' + ptms_dict[SN] + ') '
if SN in interaction_data:
functional_annotation += 'LB (' + ', '.join(interaction_data[SN]) + ') '
if GN in gprotein_generic_set:
functional_annotation += 'GP (contact) '
if functional_annotation == '':
functional_annotation = '-'
# account for multiple mutations at this position!
jsondata[SN] = [NM.amino_acid, NM.allele_frequency, NM.allele_count, NM.allele_number, NM.number_homozygotes, NM.type, effect, color, functional_annotation]
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
cache.set(name_of_cache, jsondata, 20) # 60*60*24*2 two days timeout on cache
return HttpResponse(jsondata, **response_kwargs)
def ajaxPTMs(request, slug, **response_kwargs):
name_of_cache = 'ajaxPTMs_'+slug
jsondata = cache.get(name_of_cache)
if jsondata == None:
jsondata = {}
NMs = PTMs.objects.filter(protein__entry_name=slug).prefetch_related('residue')
for NM in NMs:
SN = NM.residue.sequence_number
mod = NM.modification
jsondata[SN] = [mod]
jsondata = json.dumps(jsondata)
response_kwargs['content_type'] = 'application/json'
cache.set(name_of_cache, jsondata, 20) # 60*60*24*2 two days timeout on cache
return HttpResponse(jsondata, **response_kwargs)
# def ajaxCancerMutation(request, slug, **response_kwargs):
#
# name_of_cache = 'ajaxCancerMutation_'+slug
#
# jsondata = cache.get(name_of_cache)
#
# if jsondata == None:
# jsondata = {}
#
# CMs = CancerMutations.objects.filter(protein__entry_name=slug).prefetch_related('residue')
#
# for CM in CMs:
# SN = CM.residue.sequence_number
# jsondata[SN] = [CM.amino_acid]
#
# jsondata = json.dumps(jsondata)
# response_kwargs['content_type'] = 'application/json'
#
# cache.set(name_of_cache, jsondata, 20) #two days timeout on cache
#
# return HttpResponse(jsondata, **response_kwargs)
#
# def ajaxDiseaseMutation(request, slug, **response_kwargs):
#
# name_of_cache = 'ajaxDiseaseMutation_'+slug
#
# jsondata = cache.get(name_of_cache)
#
# if jsondata == None:
# jsondata = {}
#
# DMs = DiseaseMutations.objects.filter(protein__entry_name=slug).prefetch_related('residue')
#
# for DM in DMs:
# SN = DM.residue.sequence_number
# jsondata[SN] = [DM.amino_acid]
#
# jsondata = json.dumps(jsondata)
# response_kwargs['content_type'] = 'application/json'
#
# cache.set(name_of_cache, jsondata, 20) #two days timeout on cache
#
# return HttpResponse(jsondata, **response_kwargs)
def mutant_extract(request):
import pandas as pd
mutations = MutationExperiment.objects.all().prefetch_related('residue__display_generic_number','protein__family','exp_func','exp_type','ligand','ligand_role','refs','mutation')
# mutations = MutationExperiment.objects.filter(protein__entry_name__startswith=slug_without_species).order_by('residue__sequence_number').prefetch_related('residue')
temp = pd.DataFrame(columns=['EntryName','Family','LigandType','Class','SequenceNumber','GPCRdb','Segment','WTaa','Mutantaa','foldchange','Ligand','LigandRole','ExpQual','ExpWTValue','ExpWTVUnit','ExpMutantValue','ExpMutantSign','ExpType','ExpFunction'])
row = 0
for mutation in mutations:
if mutation.ligand:
ligand = mutation.ligand.name
else:
ligand = 'NaN'
if mutation.exp_qual:
qual = mutation.exp_qual.qual
else:
qual = 'NaN'
if mutation.exp_func_id:
func = mutation.exp_func.func
else:
func = 'NaN'
if mutation.ligand_role_id:
lrole = mutation.ligand_role.name
else:
lrole = 'NaN'
if mutation.exp_type_id:
etype = mutation.exp_type.type
else:
etype = 'NaN'
if mutation.residue.display_generic_number:
gpcrdb = mutation.residue.display_generic_number.label
else:
gpcrdb = 'NaN'
if mutation.foldchange != 0:
# print(mutation.protein.entry_name, mutation.residue.sequence_number, mutation.residue.amino_acid, mutation.mutation.amino_acid, mutation.foldchange,ligand, lrole,qual,mutation.wt_value, mutation.wt_unit, mutation.mu_value, mutation.mu_sign, etype, func)
temp.loc[row] = pd.Series({'EntryName': mutation.protein.entry_name, 'Family': mutation.protein.family.parent.name,'LigandType': mutation.protein.family.parent.parent.name,'Class': mutation.protein.family.parent.parent.parent.name, 'SequenceNumber': int(mutation.residue.sequence_number), 'GPCRdb': gpcrdb, 'Segment': mutation.residue.protein_segment.slug,'WTaa': mutation.residue.amino_acid, 'Mutantaa': mutation.mutation.amino_acid, 'foldchange': mutation.foldchange, 'Ligand': ligand, 'LigandRole': lrole, 'ExpQual': qual, 'ExpWTValue': mutation.wt_value, 'ExpWTVUnit': mutation.wt_unit, 'ExpMutantValue': mutation.mu_value, 'ExpMutantSign': mutation.mu_sign, 'ExpType': etype, 'ExpFunction': func})
row += 1
if row % 200 == 0 and row != 0:
print(row)
temp.to_csv('170125_GPCRdb_mutation.csv')
# jsondata[mutation.residue.sequence_number].append([mutation.foldchange,ligand,qual])
# print(jsondata)
@cache_page(60*60*24*21)
def statistics(request):
context = dict()
families = ProteinFamily.objects.all()
lookup = {}
for f in families:
lookup[f.slug] = f.name.replace("receptors","").replace(" receptor","").replace(" hormone","").replace("/neuropeptide","/").replace(" (G protein-coupled)","").replace(" factor","").replace(" (LPA)","").replace(" (S1P)","").replace("GPR18, GPR55 and GPR119","GPR18/55/119").replace("-releasing","").replace(" peptide","").replace(" and oxytocin","/Oxytocin").replace("Adhesion class orphans","Adhesion orphans").replace("muscarinic","musc.").replace("-concentrating","-conc.")
class_proteins = Protein.objects.filter(family__slug__startswith="00",source__name='SWISSPROT', species_id=1).prefetch_related('family').order_by('family__slug')
temp = OrderedDict([
('name',''),
('number_of_variants', 0),
('number_of_children', 0),
('receptor_t',0),
('density_of_variants', 0),
('children', OrderedDict())
])
coverage = OrderedDict()
# Make the scaffold
for p in class_proteins:
#print(p,p.family.slug)
fid = p.family.slug.split("_")
if fid[0] not in coverage:
coverage[fid[0]] = deepcopy(temp)
coverage[fid[0]]['name'] = lookup[fid[0]]
if fid[1] not in coverage[fid[0]]['children']:
coverage[fid[0]]['children'][fid[1]] = deepcopy(temp)
coverage[fid[0]]['children'][fid[1]]['name'] = lookup[fid[0]+"_"+fid[1]]
if fid[2] not in coverage[fid[0]]['children'][fid[1]]['children']:
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]] = deepcopy(temp)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['name'] = lookup[fid[0]+"_"+fid[1]+"_"+fid[2]][:28]
if fid[3] not in coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children']:
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]] = deepcopy(temp)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['name'] = p.entry_name.split("_")[0] #[:10]
coverage[fid[0]]['receptor_t'] += 1
coverage[fid[0]]['children'][fid[1]]['receptor_t'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['receptor_t'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['receptor_t'] = 1
# # POULATE WITH DATA
variants_target = Protein.objects.filter(family__slug__startswith="00", entry_name__icontains='_human').values('family_id__slug').annotate(value=Count('naturalmutations__residue_id', distinct = True))
protein_lengths = Protein.objects.filter(family__slug__startswith="00", entry_name__icontains='_human').values('family_id__slug','sequence')
protein_lengths_dict = {}
for i in protein_lengths:
protein_lengths_dict[i['family_id__slug']] = i['sequence']
for i in variants_target:
# print(i)
fid = i['family_id__slug'].split("_")
coverage[fid[0]]['number_of_variants'] += i['value']
coverage[fid[0]]['children'][fid[1]]['number_of_variants'] += i['value']
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['number_of_variants'] += i['value']
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['number_of_variants'] += i['value']
density = float(i['value'])/len(protein_lengths_dict[i['family_id__slug']])
coverage[fid[0]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['children'][fid[1]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['density_of_variants'] += round(density,2)
coverage[fid[0]]['number_of_children'] += 1
coverage[fid[0]]['children'][fid[1]]['number_of_children'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['number_of_children'] += 1
coverage[fid[0]]['children'][fid[1]]['children'][fid[2]]['children'][fid[3]]['number_of_children'] += 1
# MAKE THE TREE
tree = OrderedDict({'name':'GPCRs','children':[]})
i = 0
n = 0
for c,c_v in coverage.items():
c_v['name'] = c_v['name'].split("(")[0]
if c_v['name'].strip() in ['Other GPCRs']:
# i += 1
continue
# pass
children = []
for lt,lt_v in c_v['children'].items():
if lt_v['name'].strip() == 'Orphan' and c_v['name'].strip()=="Class A":
# $pass
continue
children_rf = []
for rf,rf_v in lt_v['children'].items():
rf_v['name'] = rf_v['name'].split("<")[0]
children_r = []
for r,r_v in rf_v['children'].items():
r_v['sort'] = n
children_r.append(r_v)
n += 1
rf_v['children'] = children_r
rf_v['sort'] = n
children_rf.append(rf_v)
lt_v['children'] = children_rf
lt_v['sort'] = n
children.append(lt_v)
c_v['children'] = children
c_v['sort'] = n
tree['children'].append(c_v)
#tree = c_v
#break
i += 1
context['tree'] = json.dumps(tree)
## Overview statistics
total_receptors = NaturalMutations.objects.filter(type='missense').values('protein_id').distinct().count()
total_mv = len(NaturalMutations.objects.filter(type='missense'))
total_lof = len(NaturalMutations.objects.exclude(type='missense'))
total_av_rv = round(len(NaturalMutations.objects.filter(type='missense', allele_frequency__lt=0.001))/ total_receptors,1)
total_av_cv = round(len(NaturalMutations.objects.filter(type='missense', allele_frequency__gte=0.001))/ total_receptors,1)
context['stats'] = {'total_mv':total_mv,'total_lof':total_lof,'total_av_rv':total_av_rv, 'total_av_cv':total_av_cv}
return render(request, 'variation_statistics.html', context)
def get_functional_sites(protein):
## PTMs
ptms = list(PTMs.objects.filter(protein=protein).values_list('residue', flat=True).distinct())
## MICROSWITCHES
micro_switches_rset = ResiduePositionSet.objects.get(name="Microswitches")
ms_label = []
for residue in micro_switches_rset.residue_position.all():
ms_label.append(residue.label)
ms_object = list(Residue.objects.filter(protein_conformation__protein=protein, generic_number__label__in=ms_label).values_list('id', flat=True).distinct())
## SODIUM POCKET
sodium_pocket_rset = ResiduePositionSet.objects.get(name="Sodium pocket")
sp_label = []
for residue in sodium_pocket_rset.residue_position.all():
sp_label.append(residue.label)
sp_object = list(Residue.objects.filter(protein_conformation__protein=protein, generic_number__label__in=ms_label).values_list('id', flat=True).distinct())
## G PROTEIN INTERACTION POSITIONS
# THIS SHOULD BE CLASS SPECIFIC (different set)
rset = ResiduePositionSet.objects.get(name='Signalling protein pocket')
gprotein_generic_set = []
for residue in rset.residue_position.all():
gprotein_generic_set.append(residue.label)
GP_object = list(Residue.objects.filter(protein_conformation__protein=protein, generic_number__label__in=gprotein_generic_set).values_list('id', flat=True).distinct())
### GET LB INTERACTION DATA
## get also ortholog proteins, which might have been crystallised to extract
## interaction data also from those
orthologs = Protein.objects.filter(family__slug__startswith=protein.family.slug, sequence_type__slug='wt').prefetch_related('protein__family')
interaction_residues = ResidueFragmentInteraction.objects.filter(
structure_ligand_pair__structure__protein_conformation__protein__parent__in=orthologs, structure_ligand_pair__annotated=True).exclude(interaction_type__type ='hidden').values_list('rotamer__residue_id', flat=True).distinct()
## Get variants of these known residues:
known_function_sites = set(x for l in [GP_object,sp_object,ms_object,ptms,interaction_residues] for x in l)
NMs = NaturalMutations.objects.filter(residue_id__in=known_function_sites)
return len(NMs)
@cache_page(60*60*24*21)
def economicburden(request):
economic_data = [{'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 29574708, 'x': 'putative-homozygous'}, {'y': 186577951, 'x': 'putative-all variants'}], 'key': 'Analgesics'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 14101883, 'x': 'putative-all variants'}], 'key': 'Antidepressant Drugs'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 10637449, 'x': 'putative-all variants'}], 'key': 'Antihist, Hyposensit & Allergic Emergen'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 6633692, 'x': 'putative-all variants'}], 'key': 'Antispasmod.&Other Drgs Alt.Gut Motility'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 8575714, 'x': 'putative-homozygous'}, {'y': 27008513, 'x': 'putative-all variants'}], 'key': 'Beta-Adrenoceptor Blocking Drugs'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 10108322, 'x': 'known-all variants'}, {'y': 25187489, 'x': 'putative-homozygous'}, {'y': 89224667, 'x': 'putative-all variants'}], 'key': 'Bronchodilators'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 5466184, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 10313279, 'x': 'putative-all variants'}], 'key': 'Drugs For Genito-Urinary Disorders'}, {'values': [{'y': 13015487, 'x': 'known-homozygous'}, {'y': 44334808, 'x': 'known-all variants'}, {'y': 13015487, 'x': 'putative-homozygous'}, {'y': 45130626, 'x': 'putative-all variants'}], 'key': 'Drugs Used In Diabetes'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 12168533, 'x': 'putative-all variants'}], 'key': "Drugs Used In Park'ism/Related Disorders"}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 28670250, 'x': 'putative-all variants'}], 'key': 'Drugs Used In Psychoses & Rel.Disorders'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 11069531, 'x': 'putative-all variants'}], 'key': 'Drugs Used In Substance Dependence'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 8694786, 'x': 'putative-all variants'}], 'key': 'Hypothalamic&Pituitary Hormones&Antioest'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 0, 'x': 'putative-homozygous'}, {'y': 9855456, 'x': 'putative-all variants'}], 'key': 'Sex Hormones & Antag In Malig Disease'}, {'values': [{'y': 0, 'x': 'known-homozygous'}, {'y': 0, 'x': 'known-all variants'}, {'y': 7848808, 'x': 'putative-homozygous'}, {'y': 25446045, 'x': 'putative-all variants'}], 'key': 'Treatment Of Glaucoma'}, {'values': [{'y': 864112, 'x': 'known-homozygous'}, {'y': 6107013, 'x': 'known-all variants'}, {'y': 19047162, 'x': 'putative-homozygous'}, {'y': 15754588, 'x': 'putative-all variants'}], 'key': 'other'}]
### PER DRUG TABLE
## drug data
nhs_sections = NHSPrescribings.objects.all().values("drugname__name", "bnf_section").distinct()
section_dict = {}
for drug in nhs_sections:
if drug['drugname__name'] in section_dict:
section_dict[drug['drugname__name']].append(drug['bnf_section'])
else:
section_dict[drug['drugname__name']] = [drug['bnf_section']]
nhs_data = NHSPrescribings.objects.all().values('drugname__name').annotate(Avg('actual_cost'), Avg('items'), Avg('quantity'))
drug_data = []
temp = {}
for i in nhs_data:
## druginformation
drugname = i['drugname__name']
average_cost = int(i['actual_cost__avg'])
average_quantity = int(i['quantity__avg'])
average_items = int(i['items__avg'])
section = section_dict[drugname]
if average_items > 0:
item_cost= round(float(average_cost)/average_items,1)
else:
item_cost = 0
## get target information
protein_targets = Protein.objects.filter(drugs__name=drugname).distinct()
targets = [p.entry_name.split('_human')[0].upper() for p in list(protein_targets)]
known_functional = 0
for target in protein_targets:
if target.entry_name in temp:
known_functional += temp[target.entry_name]
else:
function_sites = get_functional_sites(target)
known_functional += function_sites
temp[target.entry_name] = function_sites
putative_func = len(NaturalMutations.objects.filter(Q(protein__in=protein_targets), Q(sift_score__lte=0.05) | Q(polyphen_score__gte=0.1)).annotate(count_putative_func=Count('id')))
jsondata = {'drugname':drugname, 'targets': targets, 'average_cost': average_cost, 'average_quantity': average_quantity, 'average_items':average_items, 'item_cost':item_cost, 'known_func': known_functional, 'putative_func':putative_func, 'section':section}
drug_data.append(jsondata)
return render(request, 'economicburden.html', {'data':economic_data, 'drug_data':drug_data})
| apache-2.0 |
tdgoodrich/mase | models/icse14-v5-min.py | 13 | 51518 | from __future__ import division
import sys,collections,random
sys.dont_write_bytecode = True
def shuffle(lst):
random.shuffle(lst)
return lst
class Thing():
id = -1
def __init__(i,**fields) :
i.override(fields)
i._id = Thing.id = Thing.id + 1
i.finalize()
def finalize(i): pass
def override(i,d): i.__dict__.update(d); return i
def plus(i,**d): i.override(d)
def __repr__(i):
d = i.__dict__
name = i.__class__.__name__
return name+'{'+' '.join([':%s %s' % (k,pretty(d[k]))
for k in i.show()])+ '}'
def show(i):
return [k for k in sorted(i.__dict__.keys())
if not "_" in k]
def tunings( _ = None):
return dict(
Flex= [5.07, 4.05, 3.04, 2.03, 1.01, _],
Pmat= [7.80, 6.24, 4.68, 3.12, 1.56, _],
Prec= [6.20, 4.96, 3.72, 2.48, 1.24, _],
Resl= [7.07, 5.65, 4.24, 2.83, 1.41, _],
Team= [5.48, 4.38, 3.29, 2.19, 1.01, _],
acap= [1.42, 1.19, 1.00, 0.85, 0.71, _],
aexp= [1.22, 1.10, 1.00, 0.88, 0.81, _],
cplx= [0.73, 0.87, 1.00, 1.17, 1.34, 1.74],
data= [ _, 0.90, 1.00, 1.14, 1.28, _],
docu= [0.81, 0.91, 1.00, 1.11, 1.23, _],
ltex= [1.20, 1.09, 1.00, 0.91, 0.84, _],
pcap= [1.34, 1.15, 1.00, 0.88, 0.76, _],
pcon= [1.29, 1.12, 1.00, 0.90, 0.81, _],
plex= [1.19, 1.09, 1.00, 0.91, 0.85, _],
pvol= [ _, 0.87, 1.00, 1.15, 1.30, _],
rely= [0.82, 0.92, 1.00, 1.10, 1.26, _],
ruse= [ _, 0.95, 1.00, 1.07, 1.15, 1.24],
sced= [1.43, 1.14, 1.00, 1.00, 1.00, _],
site= [1.22, 1.09, 1.00, 0.93, 0.86, 0.80],
stor= [ _, _, 1.00, 1.05, 1.17, 1.46],
time= [ _, _, 1.00, 1.11, 1.29, 1.63],
tool= [1.17, 1.09, 1.00, 0.90, 0.78, _])
Features=dict(Sf=[ 'Prec','Flex','Resl','Team','Pmat'],
Prod=['rely','data','cplx','ruse','docu'],
Platform=['time','stor','pvol'],
Person=['acap','pcap','pcon','aexp','plex','ltex'],
Project=['tool','site','sced'])
def options():
return Thing(levels=10,samples=20,shrink=0.66,round=2,epsilon=0.00,
guesses=1000)
Features=dict(Sf=[ 'Prec','Flex','Resl','Team','Pmat'],
Prod=['rely','data','cplx','ruse','docu'],
Platform=['time','stor','pvol'],
Person=['acap','pcap','pcon','aexp','plex','ltex'],
Project=['tool','site','sced'])
def has(x,lst):
try:
out=lst.index(x)
return out
except ValueError:
return None
def nasa93(opt=options(),tunings=tunings()):
vl=1;l=2;n=3;h=4;vh=5;xh=6
return Thing(
sfem=21,
kloc=22,
effort=23,
names= [
# 0..8
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse',
# 9 .. 17
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
# 18 .. 25
'ltex', 'tool', 'site', 'sced', 'kloc', 'effort', '?defects', '?months'],
projects=[
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,25.9,117.6,808,15.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,24.6,117.6,767,15.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,7.7,31.2,240,10.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,8.2,36,256,10.4],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,9.7,25.2,302,11.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,2.2,8.4,69,6.6],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,3.5,10.8,109,7.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,352.8,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,xh,xh,l,h,h,n,h,n,h,h,n,n,7.5,72,226,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,20,72,566,14.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,6,24,188,9.9],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,vh,n,vh,n,h,n,n,n,100,360,2832,25.2],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,vh,n,l,n,n,n,11.3,36,456,12.8],
[h,h,h,vh,n,n,l,h,n,n,n,n,h,h,h,n,h,l,vl,n,n,n,100,215,5434,30.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,20,48,626,15.1],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,n,n,n,n,vl,n,n,n,100,360,4342,28.0],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,vh,n,vh,n,h,n,n,n,150,324,4868,32.5],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,h,n,h,n,n,n,31.5,60,986,17.6],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,vh,n,h,n,n,n,15,48,470,13.6],
[h,h,h,vh,n,n,l,h,n,n,n,xh,l,h,n,n,h,n,h,n,n,n,32.5,60,1276,20.8],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,19.7,60,614,13.9],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,66.6,300,2077,21.0],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,29.5,120,920,16.0],
[h,h,h,vh,n,h,n,n,n,n,h,n,n,n,h,n,h,n,n,n,n,n,15,90,575,15.2],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,h,n,h,n,n,n,n,n,38,210,1553,21.3],
[h,h,h,vh,n,n,n,n,n,n,n,n,n,n,h,n,h,n,n,n,n,n,10,48,427,12.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,15.4,70,765,14.5],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,48.5,239,2409,21.4],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,16.3,82,810,14.8],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,12.8,62,636,13.6],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,32.6,170,1619,18.7],
[h,h,h,vh,h,n,vh,h,n,n,vh,vh,l,vh,n,n,h,l,h,n,n,l,35.5,192,1763,19.3],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,5.5,18,172,9.1],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,10.4,50,324,11.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,14,60,437,12.4],
[h,h,h,vh,n,h,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,6.5,42,290,12.0],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,13,60,683,14.8],
[h,h,h,vh,h,n,n,h,n,n,n,n,n,n,h,n,n,n,h,h,n,n,90,444,3343,26.7],
[h,h,h,vh,n,n,n,h,n,n,n,n,n,n,n,n,n,n,n,n,n,n,8,42,420,12.5],
[h,h,h,vh,n,n,n,h,n,n,h,n,n,n,n,n,n,n,n,n,n,n,16,114,887,16.4],
[h,h,h,vh,h,n,h,h,n,n,vh,h,l,h,h,n,n,l,h,n,n,l,177.9,1248,7998,31.5],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,h,n,n,n,n,n,n,n,302,2400,8543,38.4],
[h,h,h,vh,h,n,h,l,n,n,n,n,h,h,n,n,h,n,n,h,n,n,282.1,1368,9820,37.3],
[h,h,h,vh,h,h,h,l,n,n,n,n,n,h,n,n,h,n,n,n,n,n,284.7,973,8518,38.1],
[h,h,h,vh,n,h,h,n,n,n,n,n,l,n,h,n,h,n,h,n,n,n,79,400,2327,26.9],
[h,h,h,vh,l,l,n,n,n,n,n,n,l,h,vh,n,h,n,h,n,n,n,423,2400,18447,41.9],
[h,h,h,vh,h,n,n,n,n,n,n,n,l,h,vh,n,vh,l,h,n,n,n,190,420,5092,30.3],
[h,h,h,vh,h,n,n,h,n,n,n,h,n,h,n,n,h,n,h,n,n,n,47.5,252,2007,22.3],
[h,h,h,vh,l,vh,n,xh,n,n,h,h,l,n,n,n,h,n,n,h,n,n,21,107,1058,21.3],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,78,571.4,4815,30.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,11.4,98.8,704,15.5],
[h,h,h,vh,l,n,h,h,n,n,vh,n,n,h,h,n,h,n,h,n,n,n,19.3,155,1191,18.6],
[h,h,h,vh,l,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,101,750,4840,32.4],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,219,2120,11761,42.8],
[h,h,h,vh,l,h,n,h,n,n,h,h,l,n,n,n,h,n,n,n,n,n,50,370,2685,25.4],
[h,h,h,vh,h,vh,h,h,n,n,vh,vh,n,vh,vh,n,vh,n,h,h,n,l,227,1181,6293,33.8],
[h,h,h,vh,h,n,h,vh,n,n,n,n,l,h,vh,n,n,l,n,n,n,l,70,278,2950,20.2],
[h,h,h,vh,h,h,l,h,n,n,n,n,l,n,n,n,n,n,h,n,n,l,0.9,8.4,28,4.9],
[h,h,h,vh,l,vh,l,xh,n,n,xh,vh,l,h,h,n,vh,vl,h,n,n,n,980,4560,50961,96.4],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,vh,vh,n,n,h,h,n,n,n,350,720,8547,35.7],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,70,458,2404,27.5],
[h,h,h,vh,h,h,n,xh,n,n,h,h,l,h,n,n,n,h,h,h,n,n,271,2460,9308,43.4],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,90,162,2743,25.0],
[h,h,h,vh,n,n,n,n,n,n,n,n,l,h,h,n,h,n,h,n,n,n,40,150,1219,18.9],
[h,h,h,vh,n,h,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,137,636,4210,32.2],
[h,h,h,vh,n,h,n,h,n,n,h,n,h,h,h,n,h,n,h,n,n,n,150,882,5848,36.2],
[h,h,h,vh,n,vh,n,h,n,n,h,n,l,h,h,n,h,n,h,n,n,n,339,444,8477,45.9],
[h,h,h,vh,n,l,h,l,n,n,n,n,h,h,h,n,h,n,h,n,n,n,240,192,10313,37.1],
[h,h,h,vh,l,h,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,144,576,6129,28.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,151,432,6136,26.2],
[h,h,h,vh,l,n,l,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,34,72,1555,16.2],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,98,300,4907,24.4],
[h,h,h,vh,l,n,n,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,85,300,4256,23.2],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,20,240,813,12.8],
[h,h,h,vh,l,n,l,n,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,111,600,4511,23.5],
[h,h,h,vh,l,h,vh,h,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,162,756,7553,32.4],
[h,h,h,vh,l,h,h,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,352,1200,17597,42.9],
[h,h,h,vh,l,h,n,vh,n,n,n,vh,l,h,h,n,h,h,h,n,n,l,165,97,7867,31.5],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,60,409,2004,24.9],
[h,h,h,vh,h,h,n,vh,n,n,h,h,l,h,n,n,n,h,h,n,n,n,100,703,3340,29.6],
[h,h,h,vh,n,h,vh,vh,n,n,xh,xh,h,n,n,n,n,l,l,n,n,n,32,1350,2984,33.6],
[h,h,h,vh,h,h,h,h,n,n,vh,xh,h,h,h,n,h,h,h,n,n,n,53,480,2227,28.8],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,41,599,1594,23.0],
[h,h,h,vh,h,h,l,vh,n,n,vh,xh,l,vh,vh,n,vh,vl,vl,h,n,n,24,430,933,19.2],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,165,4178.2,6266,47.3],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,65,1772.5,2468,34.5],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,70,1645.9,2658,35.4],
[h,h,h,vh,h,vh,h,xh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,50,1924.5,2102,34.2],
[h,h,h,vh,l,vh,l,vh,n,n,vh,xh,l,h,n,n,l,vl,l,h,n,n,7.25,648,406,15.6],
[h,h,h,vh,h,vh,h,vh,n,n,xh,xh,n,h,h,n,h,h,h,n,n,n,233,8211,8848,53.1],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n,16.3,480,1253,21.5],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 6.2, 12,477,15.4],
[h,h,h,vh,n,h,n,vh,n,n,vh,vh,h,n,n,n,n,l,l,n,n,n, 3.0, 38,231,12.0],
])
def coc81(opt=options(),tunings=tunings()):
vl=1;l=2;n=3;h=4;vh=5;xh=6
return Thing(
sfem=21,
kloc=22,
effort=23,
names= [
'Prec', 'Flex', 'Resl', 'Team', 'Pmat', 'rely', 'data', 'cplx', 'ruse',
'docu', 'time', 'stor', 'pvol', 'acap', 'pcap', 'pcon', 'aexp', 'plex',
'ltex', 'tool', 'site', 'sced', 'kloc', 'effort', '?defects', '?months'],
projects=[
[h,h,h,vh,vl,l,vh,vl,n,n,n,h,h,l,l,n,l,l,n,vl,h,n,113,2040,13027,38.4],
[h,h,h,vh,vl,l,vh,l,n,n,n,h,n,n,n,n,h,h,h,vl,h,n,293,1600,25229,48.6],
[h,h,h,vh,n,n,vh,l,n,n,n,n,l,h,h,n,vh,h,h,l,h,n,132,243,3694,28.7],
[h,h,h,vh,vl,vl,vh,vl,n,n,n,n,l,l,vl,n,h,n,h,vl,h,n,60,240,5688,28.0],
[h,h,h,vh,vl,l,l,n,n,n,n,n,l,n,h,n,n,h,h,vl,h,n,16,33,970,14.3],
[h,h,h,vh,vl,vl,n,l,n,n,n,vh,n,vl,vl,n,n,h,h,vl,h,n,4,43,553,11.6],
[h,h,h,vh,n,vl,n,n,n,n,n,n,l,n,n,n,n,h,h,l,h,n,6.9,8,350,10.3],
[h,h,h,vh,vl,h,l,vh,n,n,xh,xh,vh,vh,n,n,h,vl,vl,vl,h,l,22,1075,3511,24.5],
[h,h,h,vh,n,h,l,vh,n,n,vh,vh,h,h,h,n,n,l,l,vl,h,n,30,423,1989,24.1],
[h,h,h,vh,l,vh,l,vh,n,n,h,xh,n,h,h,n,vh,h,n,vl,h,n,29,321,1496,23.2],
[h,h,h,vh,l,vh,l,vh,n,n,h,xh,n,h,h,n,vh,h,n,vl,h,n,32,218,1651,24.0],
[h,h,h,vh,n,h,l,vh,n,n,h,h,n,h,h,n,vh,n,h,vl,h,l,37,201,1783,19.1],
[h,h,h,vh,n,h,l,vh,n,n,h,h,h,vh,vh,n,n,l,n,vl,h,n,25,79,1138,18.4],
[h,h,h,vh,vl,h,l,xh,n,n,vh,xh,h,h,vh,n,n,l,l,vl,h,vl,3,60,387,9.4],
[h,h,h,vh,n,vh,l,vh,n,n,vh,h,h,h,h,n,l,vl,vl,vl,h,vl,3.9,61,276,9.5],
[h,h,h,vh,l,vh,n,vh,n,n,vh,xh,n,h,h,n,n,n,n,vl,h,n,6.1,40,390,14.9],
[h,h,h,vh,l,vh,n,vh,n,n,vh,xh,n,h,h,n,vh,n,n,vl,h,n,3.6,9,230,12.3],
[h,h,h,vh,vl,h,vh,h,n,n,vh,vh,n,h,n,n,n,n,n,vl,h,l,320,11400,34588,52.4],
[h,h,h,vh,n,h,h,n,n,n,h,vh,l,vh,n,n,h,n,n,l,h,n,1150,6600,41248,67.0],
[h,h,h,vh,vl,vh,h,vh,n,n,h,vh,h,vh,n,n,vh,l,l,vl,h,l,299,6400,30955,53.4],
[h,h,h,vh,n,n,vh,h,n,n,n,n,l,h,n,n,n,n,n,l,h,n,252,2455,11664,40.8],
[h,h,h,vh,n,h,n,n,n,n,n,h,n,h,h,n,vh,h,n,vl,h,vl,118,724,5172,21.7],
[h,h,h,vh,l,h,n,n,n,n,n,h,n,h,h,n,vh,h,n,vl,h,vl,77,539,4362,19.5],
[h,h,h,vh,n,l,n,l,n,n,n,h,n,n,n,n,vl,l,h,n,h,n,90,453,4407,27.1],
[h,h,h,vh,n,h,vh,vh,n,n,n,h,n,h,h,n,n,l,n,l,h,l,38,523,2269,20.2],
[h,h,h,vh,n,n,n,l,n,n,n,h,h,h,h,n,n,l,n,vl,h,l,48,387,2419,18.5],
[h,h,h,vh,n,h,l,h,n,n,n,vh,n,n,n,n,n,n,n,vl,h,l,9.4,88,517,12.1],
[h,h,h,vh,vl,h,h,vh,n,n,h,vh,h,h,h,n,n,l,l,vl,h,n,13,98,1473,19.6],
[h,h,h,vh,n,l,n,n,n,n,n,n,n,n,h,n,vl,n,n,l,h,vl,2.14,7.3,138,5.3],
[h,h,h,vh,n,l,n,n,n,n,n,n,n,n,h,n,vl,n,n,l,h,vl,1.98,5.9,128,5.2],
[h,h,h,vh,l,vh,h,n,n,n,n,xh,h,h,h,n,vh,l,l,vl,h,n,62,1063,3682,32.8],
[h,h,h,vh,vl,l,h,l,n,n,n,n,n,vh,n,n,vh,n,n,vl,h,n,390,702,30484,45.8],
[h,h,h,vh,n,vh,h,vh,n,n,n,xh,h,h,h,n,vh,h,n,l,h,n,42,605,1803,27.1],
[h,h,h,vh,n,h,h,n,n,n,n,n,n,n,n,n,n,n,n,vl,h,vl,23,230,1271,14.2],
[h,h,h,vh,vl,vl,l,vh,n,n,n,vh,h,n,n,n,h,l,n,vl,h,n,13,82,2250,17.2],
[h,h,h,vh,l,l,n,n,n,n,n,n,l,l,l,n,n,h,h,l,h,n,15,55,1004,15.8],
[h,h,h,vh,l,l,l,vl,n,n,n,h,n,h,h,n,vh,n,n,vl,h,n,60,47,2883,20.3],
[h,h,h,vh,n,n,n,h,n,n,n,n,l,vh,n,n,h,h,h,l,h,n,15,12,504,13.5],
[h,h,h,vh,n,n,n,h,n,n,n,n,l,vh,vh,n,vh,n,h,vl,h,n,6.2,8,197,9.6],
[h,h,h,vh,vl,n,l,vh,n,n,n,n,n,h,l,n,vh,n,n,vl,h,n,n,8,294,9.5],
[h,h,h,vh,n,l,l,n,n,n,n,n,l,n,vh,n,vh,h,h,l,h,n,5.3,6,173,8.7],
[h,h,h,vh,l,l,n,n,n,n,n,h,l,h,n,n,n,h,h,vl,h,n,45.5,45,2645,21.0],
[h,h,h,vh,l,n,n,n,n,n,n,vh,l,h,n,n,n,h,h,vl,h,n,28.6,83,1416,18.9],
[h,h,h,vh,vl,l,n,n,n,n,n,vh,l,n,n,n,n,h,h,vl,h,n,30.6,87,2444,20.5],
[h,h,h,vh,l,l,n,n,n,n,n,h,l,n,n,n,n,h,h,vl,h,n,35,106,2198,20.1],
[h,h,h,vh,l,l,n,n,n,n,n,h,l,n,h,n,n,h,h,vl,h,n,73,126,4188,25.1],
[h,h,h,vh,vl,vl,l,vh,n,n,n,n,l,vh,vh,n,vh,l,l,vl,h,n,23,36,2161,15.6],
[h,h,h,vh,vl,l,l,l,n,n,n,n,l,l,l,n,h,h,h,vl,h,n,464,1272,32002,53.4],
[h,h,h,vh,n,n,n,l,n,n,n,n,n,vh,vh,n,n,l,n,l,h,n,91,156,2874,22.6],
[h,h,h,vh,l,h,n,n,n,n,vh,vh,n,h,h,n,n,l,n,vl,h,n,24,176,1541,20.3],
[h,h,h,vh,vl,l,n,n,n,n,n,n,n,l,vl,n,n,n,h,vl,h,n,10,122,1225,16.2],
[h,h,h,vh,vl,l,l,l,n,n,n,h,h,n,n,n,n,l,l,vl,h,n,8.2,41,855,13.1],
[h,h,h,vh,l,l,l,h,n,n,h,vh,vh,vh,vh,n,n,l,l,vl,h,l,5.3,14,533,9.3],
[h,h,h,vh,n,n,l,n,n,n,n,h,h,n,n,n,vh,n,h,vl,h,n,4.4,20,216,10.6],
[h,h,h,vh,vl,l,l,vl,n,n,n,n,l,h,l,n,vh,h,h,vl,h,n,6.3,18,309,9.6],
[h,h,h,vh,vl,h,l,vh,n,n,vh,vh,n,h,n,n,h,l,l,vl,h,l,27,958,3203,21.1],
[h,h,h,vh,vl,n,l,h,n,n,h,vh,vh,n,n,n,n,l,l,vl,h,vl,17,237,2622,16.0],
[h,h,h,vh,n,vh,l,vh,n,n,xh,vh,n,vh,vh,n,vh,h,h,vl,h,n,25,130,813,20.9],
[h,h,h,vh,n,n,l,h,n,n,n,h,n,n,n,n,n,n,n,vl,h,n,23,70,1294,18.2],
[h,h,h,vh,vl,h,l,vh,n,n,h,h,n,h,h,n,l,l,l,vl,h,l,6.7,57,650,11.3],
[h,h,h,vh,n,n,l,h,n,n,n,n,l,h,h,n,n,h,n,vl,h,n,28,50,997,16.4],
[h,h,h,vh,n,l,l,vh,n,n,h,vh,h,n,vh,n,vh,vl,vl,vl,h,n,9.1,38,918,15.3],
[h,h,h,vh,n,n,l,h,n,n,n,n,n,vh,h,n,vh,n,n,vl,h,n,10,15,418,11.6],
])
def sdiv(lst, tiny=3,cohen=0.3,
num1=lambda x:x[0], num2=lambda x:x[1]):
"Divide lst of (num1,num2) using variance of num2."
#----------------------------------------------
class Counts(): # Add/delete counts of numbers.
def __init__(i,inits=[]):
i.zero()
for number in inits: i + number
def zero(i): i.n = i.mu = i.m2 = 0.0
def sd(i) :
if i.n < 2: return i.mu
else:
return (max(0,i.m2)*1.0/(i.n - 1))**0.5
def __add__(i,x):
i.n += 1
delta = x - i.mu
i.mu += delta/(1.0*i.n)
i.m2 += delta*(x - i.mu)
def __sub__(i,x):
if i.n < 2: return i.zero()
i.n -= 1
delta = x - i.mu
i.mu -= delta/(1.0*i.n)
i.m2 -= delta*(x - i.mu)
#----------------------------------------------
def divide(this,small): #Find best divide of 'this'
lhs,rhs = Counts(), Counts(num2(x) for x in this)
n0, least, cut = 1.0*rhs.n, rhs.sd(), None
for j,x in enumerate(this):
if lhs.n > tiny and rhs.n > tiny:
maybe= lhs.n/n0*lhs.sd()+ rhs.n/n0*rhs.sd()
if maybe < least :
if abs(lhs.mu - rhs.mu) >= small:
cut,least = j,maybe
rhs - num2(x)
lhs + num2(x)
return cut,least
#----------------------------------------------
def recurse(this, small,cuts):
cut,sd = divide(this,small)
if cut:
recurse(this[:cut], small, cuts)
recurse(this[cut:], small, cuts)
else:
cuts += [(sd * len(this)/len(lst),this)]
return cuts
#---| main |-----------------------------------
small = Counts(num2(x) for x in lst).sd()*cohen
if lst:
return recurse(sorted(lst,key=num1),small,[])
def fss(d=coc81(),want=0.25):
rank=[]
for i in range(d.sfem):
xs=sdiv(d.projects,
num1=lambda x:x[i],
num2=lambda x:x[d.effort])
xpect = sum(map(lambda x: x[0],xs))
rank += [(xpect,i)]
rank = sorted(rank)
keep = int(len(rank)*want)
doomed= map(lambda x:x[1], rank[keep:])
for project in d.projects:
for col in doomed:
project[col] = 3
return d
def less(d=coc81(),n=2):
skipped = 0
names0 = d.names
toUse,doomed = [],[]
for v in Features.values():
toUse += v[:n]
for n,name in enumerate(names0):
if n >= d.sfem:
break
if not has(name,toUse):
doomed += [n]
for project in d.projects:
for col in doomed:
project[col] = 3
return d
def meanr(lst):
total=n=0.00001
for x in lst:
if not x == None:
total += x
n += 1
return total/n
def tothree(lst):
below=lst[:2]
above=lst[3:]
m1 = meanr(below)
m2= meanr(above)
below = [m1 for _ in below]
above = [m2 for _ in above]
return below + [lst[2]] + above
def rr3(lst):
#return lst
r = 1
if lst[0]> 2 : r = 0
def rr1(n): return round(x,r) if x else None
tmp= tothree([rr1(x) for x in lst])
return tmp
def rr5(lst):
if lst[0] > 2:
return [6,5,4,3,2,1]
if lst[0] < 0:
return [0.8, 0.9, 1, 1.1, 1.2, 1.3]
return [1.2,1.1,1,0.9,0.8,0.7]
def rrs5(d):
for k in d: d[k] = rr5(d[k])
return d
def rrs3(d):
for k in d: d[k] = rr3(d[k])
return d
def detune(m,tun=tunings()):
def best(at,one,lst):
least,x = 100000,None
for n,item in enumerate(lst):
if item:
tmp = abs(one - item)
if tmp < least:
least = tmp
x = n
return x
def detuned(project):
for n,(name,val) in enumerate(zip(m.names,project)):
if n <= m.sfem:
project[n] = best(n,val,tun[name]) + 1
return project
m.projects = [detuned(project) for
project in m.projects]
for p in m.projects: print p
return m
#########################################
# begin code
## imports
import random,math,sys
r = random.random
any = random.choice
seed = random.seed
exp = lambda n: math.e**n
ln = lambda n: math.log(n,math.e)
g = lambda n: round(n,2)
def say(x):
sys.stdout.write(str(x))
sys.stdout.flush()
def nl(): print ""
## classes
class Score(Thing):
def finalize(i) :
i.all = []
i.residuals=[]
i.raw=[]
i.use=False
def seen(i,got,want):
i.residuals += [abs(got - want)]
i.raw += [got - want]
tmp = i.mre(got,want)
i.all += [tmp]
return tmp
def mar(i):
return median(sorted(i.residuals))
#return sum(i.residuals) / len(i.residuals)
def sanity(i,baseline):
return i.mar()*1.0/baseline
def mre(i,got,want):
return abs(got- want)*1.0/(0.001+want)
def mmre(i):
return sum(i.all)*1.0/len(i.all)
def medre(i):
return median(sorted(i.all))
def pred(i,n=30):
total = 0.0
for val in i.all:
if val <= n*0.01: total += 1
return total*1.0/len(i.all)
## low-level utils
def pretty(s):
if isinstance(s,float):
return '%.3f' % s
else: return '%s' % s
def stats(l,ordered=False):
if not ordered: l= sorted(l)
p25= l[len(l)/4]
p50= l[len(l)/2]
p75= l[len(l)*3/4]
p100= l[-1]
print p50, p75-p25, p100
## mode prep
def valued(d,opt,t=tunings()):
for old in d.projects:
for i,name in enumerate(d.names):
if i <= d.sfem:
tmp = old[i]
if not isinstance(tmp,float):
tmp = old[i] - 1
old[i] = round(t[name][tmp],opt.round)
return d
####################################
def median(lst,ordered=False):
if not ordered: lst= sorted(lst)
n = len(lst)
if n==0: return 0
if n==1: return lst[0]
if n==2: return (lst[0] + lst[1])*0.5
if n % 2: return lst[n//2]
n = n//2
return (lst[n] + lst[n+1]) * 0.5
class Count:
def __init__(i,name="counter"):
i.name=name
i.lo = 10**32
i.hi= -1*10**32
i._all = []
i._also = None
def keep(i,n):
i._also= None
if n > i.hi: i.hi = n
if n < i.lo: i.lo = n
i._all += [n]
def centroid(i):return i.also().median
def all(i): return i.also().all
def also(i):
if not i._also:
i._all = sorted(i._all)
if not i._all:
i._also = Thing(all=i._all,
median=0)
else:
i._also = Thing(all=i._all,
median=median(i._all))
return i._also
def norm(i,n):
#return n
return (n - i.lo)*1.0 / (i.hi - i.lo + 0.0001)
def clone(old,data=[]):
return Model(map(lambda x: x.name,old.headers),
data)
class Model:
def __init__(i,names,data=[],indep=0):
i.indep = indep
i.headers = [Count(name) for name in names]
i._also = None
i.rows = []
for row in data: i.keep(row)
def centroid(i): return i.also().centroid
def xy(i) : return i.also().xy
def also(i):
if not i._also:
xs, ys = 0,0
for row in i.rows:
xs += row.x
ys += row.y
n = len(i.rows)+0.0001
i._also= Thing(
centroid= map(lambda x: x.centroid(),
i.headers),
xy = (xs/n, ys/n))
return i._also
def keep(i,row):
i._also = None
if isinstance(row,Row):
content=row.cells
else:
content=row
row = Row(cells=row)
for cell,header in zip(content,i.headers):
header.keep(cell)
i.rows += [row]
class Row(Thing):
def finalize(i):
i.x = i.y = 0
def xy(i,x,y):
if not i.x:
i.x, i.y = x,y
def lo(m,x) : return m.headers[x].lo
def hi(m,x) : return m.headers[x].hi
def norm(m,x,n) : return m.headers[x].norm(n)
def cosineRule(z,m,c,west,east,slots):
a = dist(m,z,west,slots)
b = dist(m,z,east,slots)
x= (a*a + c*c - b*b)/(2*c+0.00001) # cosine rule
y= max(0,a**2 - x**2)**0.5
return x,y
def fastmap(m,data,slots):
"Divide data into two using distance to two distant items."
one = any(data) # 1) pick anything
west = furthest(m,one,data,slots) # 2) west is as far as you can go from anything
east = furthest(m,west,data,slots) # 3) east is as far as you can go from west
c = dist(m,west,east,slots)
# now find everyone's distance
lst = []
for one in data:
x,y= cosineRule(one,m,c,west,east,slots)
one.xy(x,y)
lst += [(x, one)]
lst = sorted(lst)
wests,easts = [], []
cut = len(lst) // 2
cutx = lst[cut][0]
for x,one in lst:
what = wests if x <= cutx else easts
what += [one]
return wests,west, easts,east,cutx,c
def dist(m,i,j,slots):
"Euclidean distance 0 <= d <= 1 between decisions"
d1,d2 = slots.what(i), slots.what(j)
n = len(d1)
deltas = 0
for d in range(n):
n1 = norm(m, d, d1[d])
n2 = norm(m, d, d2[d])
inc = (n1-n2)**2
deltas += inc
return deltas**0.5 / n**0.5
def furthest(m,i,all,slots,
init = 0,
better = lambda x,y: x>y):
"find which of all is furthest from 'i'"
out,d= i,init
for j in all:
if not i == j:
tmp = dist(m,i,j,slots)
if better(tmp,d): out,d = j,tmp
return out
def myCentroid(row,t):
x1,y1=row.x,row.y
out,d=None,10**32
for leaf in leaves(t):
x2,y2=leaf.m.xy()
tmp = ((x2-x1)**2 + (y2-y1)**2)**0.5
if tmp < d:
out,d=leaf,tmp
return out
def centroid2(row,t):
x1,y1=row.x,row.y
out=[]
for leaf in leaves(t):
x2,y2 = leaf.m.xy()
tmp = ((x2-x1)**2 + (y2-y1)**2)**0.5
out += [(tmp,leaf)]
out = sorted(out)
if len(out)==0:
return [(None,None),(None,None)]
if len(out) ==1:
return out[0],out[0]
else:
return out[0],out[1]
def where0(**other):
return Thing(minSize = 10, # min leaf size
depthMin= 2, # no pruning till this depth
depthMax= 10, # max tree depth
b4 = '|.. ', # indent string
verbose = False, # show trace info?
what = lambda x: x.cells
).override(other)
def where(m,data,slots=None):
slots = slots or where0()
return where1(m,data,slots,0,10**32)
def where1(m, data, slots, lvl, sd0,parent=None):
here = Thing(m=clone(m,data),
up=parent,
_west=None,_east=None,leafp=False)
def tooDeep(): return lvl > slots.depthMax
def tooFew() : return len(data) < slots.minSize
def show(suffix):
if slots.verbose:
print slots.b4*lvl + str(len(data)) + suffix
if tooDeep() or tooFew():
show(".")
here.leafp=True
else:
show("1")
wests,west, easts,east,cut,c = fastmap(m,data,slots)
here.plus(c=c, cut=cut, west=west, east=east)
sd1=Num("west",[slots.klass(w) for w in wests]).spread()
sd2=Num("east",[slots.klass(e) for e in easts]).spread()
goWest = goEast = True
if lvl > 0:
goWest = sd1 < sd0
goEast = sd2 < sd0
if goWest:
here._west = where1(m, wests, slots, lvl+1, sd1,here)
if goEast:
here._east = where1(m, easts, slots, lvl+1, sd2,here)
return here
def leaf(t,row,slots,lvl=1):
if t.leafp:
return t
else:
x,_ = cosineRule(row, t.m, t.c,t.west,t.east,slots)
return leaf(t._west if x <= t.cut else t._east,
row,slots,lvl+1)
def preOrder(t):
if t:
yield t
for kid in [t._west,t._east]:
for out in preOrder(kid):
yield out
def leaves(t):
for t1 in preOrder(t):
if t1.leafp:
yield t1
def tprint(t,lvl=0):
if t:
print '|.. '*lvl + str(len(t.m.rows)), '#'+str(t._id)
tprint(t._west,lvl+1)
tprint(t._east,lvl+1)
import sys,math,random
sys.dont_write_bytecode = True
def go(f):
"A decorator that runs code at load time."
print "\n# ---|", f.__name__,"|-----------------"
if f.__doc__: print "#", f.__doc__
f()
# random stuff
seed = random.seed
any = random.choice
# pretty-prints for list
def gs(lst) : return [g(x) for x in lst]
def g(x) : return float('%.4f' % x)
"""
### More interesting, low-level stuff
"""
def timing(f,repeats=10):
"How long does 'f' take to run?"
import time
time1 = time.clock()
for _ in range(repeats):
f()
return (time.clock() - time1)*1.0/repeats
def showd(d):
"Pretty print a dictionary."
def one(k,v):
if isinstance(v,list):
v = gs(v)
if isinstance(v,float):
return ":%s %g" % (k,v)
return ":%s %s" % (k,v)
return ' '.join([one(k,v) for k,v in
sorted(d.items())
if not "_" in k])
####################################
## high-level business knowledge
def effort(d,project, a=2.94,b=0.91):
"Primitive estimation function"
def sf(x) : return x[0].isupper()
sfs , ems = 0.0, 1.0
kloc = project[d.kloc]
i = -1
for name,val in zip(d.names,project):
i += 1
if i > d.sfem : break
if sf(name):
sfs += val
else:
ems *= val
return a*kloc**(b + 0.01*sfs) * ems
def cart(train,test,most):
from sklearn import tree
indep = map(lambda x: x[:most+1], train)
dep = map(lambda x: x[most+1], train)
t = tree.DecisionTreeRegressor(random_state=1).fit(indep,dep)
return t.predict(test[:most+1])[0]
def nc(n):
return True #say(chr(ord('a') + n))
def loo(s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,
s12,s13,s14,s15,s16,s17,s18,s19,s20,s21,s22,s23,s24,s25,s26,
s27,s28,s29,s30,s31,s32,s33,s34,s35,s36,s37,s38,s39,s40,s41,
s42,s43,s44,s45,s46,s47,s48,s49,
model=nasa93,t=tunings(),opt=None,detuning=True
):
"Leave one-out"
if opt == None: opt=options()
d= model(opt)
for i,project in enumerate(d.projects):
want = project[d.effort]
them = d.projects[:i] + d.projects[i+1:]
if s15.use:
nc(15)
got15=knn(model(),them,project,opt,5); s15.seen(got15,want)
if s16.use:
nc(16)
got16=knn(model(),them,project,opt,3); s16.seen(got16,want)
if s17.use:
nc(17)
got17=knn(model(),them,project,opt,1); s17.seen(got17,want)
#say(0)
if s5.use or s7.use:
nc(5)
got5,got7 = vasil(model,them,project); s5.seen(got5,want); s7.seen(got7,want)
#say(1)
if s1.use:
nc(1)
got1 = wildGuess(d,them,opt); s1.seen(got1,want)
#say(2)
if s4.use:
nc(4)
got4 = cart(them, project,d.kloc); s4.seen(got4,want)
#say(5)
if s8.use:
nc(8)
got8 = loc(d,them,project,3); s8.seen(got8,want)
if s18.use:
nc(18)
got18 = loc(d,them,project,1); s18.seen(got18,want)
#say(6)
if s9.use or s10.use or s19.use or s20.use or s21.use or s22.use:
project1 = project[:]
project1[d.kloc]=0
them1=[]
for one in them:
tmp=one[:]
tmp[d.kloc]=0
them1 += [tmp]
if s9.use or s10.use:
nc(9)
got9,got10 = vasil(model,them1,project1);
s9.seen(got9,want); s10.seen(got10,want)
if s19.use:
nc(19)
got19=knn(model(),them1,project1,opt,5); s19.seen(got19,want)
if s20.use:
nc(20)
got20=knn(model(),them1,project1,opt,3); s20.seen(got20,want)
if s21.use:
nc(21)
got21=knn(model(),them1,project1,opt,1); s21.seen(got21,want)
if s22.use:
nc(22)
got22=cart(them1, project1,d.kloc);s22.seen(got22,want)
if s2.use or s3.use:
d= model(opt)
d = valued(d,opt)
for i,project in enumerate(d.projects):
want = project[d.effort]
them = d.projects[:i] + d.projects[i+1:]
if s2.use:
nc(2)
got2 = effort(d,project,2.94,0.91); s2.seen(got2,want)
if s3.use:
nc(3)
a,b = coconut(d,them,opt);
got3 = effort(d,project,a,b); s3.seen(got3,want)
if s11.use or s12.use:
#if not detuning: return True
t=rrs3(tunings())
d=model()
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
#say(7)
if s11.use:
nc(11)
got11=effort(d,project,2.94,0.91); s11.seen(got11,want)
if s12.use:
nc(12)
a,b=coconut(d,them,opt)
#say(8)
got12= effort(d,project,a,b); s12.seen(got12,want)
if s23.use or s24.use or s25.use or s26.use:
t = rrs3(tunings())
d = model()
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
for n,s in [(8,s23), (12,s24), (16,s25),(4,s26)]:
nc(23)
them = shuffle(them)[:n]
a,b = coconut(d,them,opt)
got = effort(d,project,a,b); s.seen(got,want)
if s27.use or s28.use or s29:
for n,s in [(1,s27),(2,s28),(3,s29)]:
t = rrs3(tunings())
d = model()
d = less(d,n)
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
nc(28)
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
a,b = coconut(d,them,opt)
got = effort(d,project,a,b); s.seen(got,want)
if s30.use or s31.use or s32.use or s33.use or s34.use or s35.use or s36.use or s37.use or s38.use or s39.use or s40.use or s41.use:
for n1,n2,s in [(0.25,4,s30),(0.25,8,s31),(0.25,12,s32),(0.25,16,s33),
(0.5, 4,s34),(0.5, 8,s35),(0.5, 12,s36),(0.5, 16,s37),
(1,4,s38),(1,8,s39),(1,12,s40),(1,16,s41)]:
t = rrs3(tunings())
d = model()
d.projects = shuffle(d.projects)[:n2]
d = fss(d,n1)
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
nc(36)
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
a,b = coconut(d,them,opt)
got = effort(d,project,a,b); s.seen(got,want)
if s13.use or s14.use:
t=rrs5(tunings())
d=model()
d = valued(d,opt,t=t)
for i,project in enumerate(d.projects):
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
#say(9)
if s13.use:
nc(13)
got13=effort(d,project,2.94,0.91); s13.seen(got13,want)
if s14.use:
nc(14)
a,b=coconut(d,them,opt)
#say("+")
got14= effort(d,project,a,b); s14.seen(got14,want)
if s42.use or s43.use or s44.use or s45.use or s46.use or s47.use or s48.use or s49.use:
n1 = 0.5
n2 = 8
for noise,(carts,cocs,nuts,nears) in [
(.25, ( s42, s44, s46, s48)),
(.5, ( s43, s45,s47, s49))
]:
t = rrs3(tunings())
d = model()
d.projects = shuffle(d.projects)[:n2]
d = fss(d,n1)
d = valued(d,opt,t=t)
for project in d.projects:
old = project[d.kloc]
new = old * ((1 - noise) + 2*noise*random.random())
project[d.kloc]= new
for i,project in enumerate(d.projects):
nc(42)
want= project[d.effort]
them= d.projects[:i] + d.projects[i+1:]
a,b=coconut(d,them,opt)
nuts.seen(effort(d,project,a,b) ,want)
carts.seen(cart(them, project,d.kloc),want)
cocs.seen(effort(d,project) ,want)
def loc(d,them,project,n):
me = project[d.kloc]
all= sorted([(abs(me-x[d.kloc]),x[d.effort]) for x in them])
one = two = three = four = five = all[0][1]
if len(them) > 1: two = all[1][1]
if len(them) > 2: three=all[2][1]
if len(them) > 3: four=all[3][1]
if len(them) > 4: five=all[4][1]
# look at that: mean works as well as triangular kernel
if n == 1 : return one
if n == 2 : return (one *2 + two*1)/3
if n == 3 : return (one*3 + two*2+ three*1)/6
if n == 4 : return (one * 4 + two * 3 + three * 2 + four * 1)/10
return (one*5 + two*4 + three*3 + four*2 + five*1)/15
# if n == 1 : return one
# if n == 2 : return (one *1 + two*1)/2
# if n == 3 : return (one*1 + two*1+ three*1)/3
# if n == 4 : return (one * 1 + two * 1 + three * 1 + four * 1)/4
# return (one*1 + two*1 + three*1 + four*1 + five*1)/5
def walk(lst):
lst = sorted([(median(x[1].all),x[0],x[1].all) for x in lst])
say( lst[0][1])
walk1(lst[0],lst[1:])
print ""
def walk1(this,those):
if those:
that=those[0]
_,n1=this[1], this[2]
w2,n2=that[1], that[2]
if mwu(n1,n2) :
say(" < "+ str(w2))
walk1(that,those[1:])
else:
say(" = " + str(w2))
walk1(("","",n1+n2),those[1:])
def a12slow(lst1,lst2,rev=True):
"how often is x in lst1 more than y in lst2?"
more = same = 0.0
for x in lst1:
for y in lst2:
if x==y : same += 1
elif rev and x > y : more += 1
elif not rev and x < y : more += 1
x= (more + 0.5*same) / (len(lst1)*len(lst2))
#if x > 0.71: return g(x),"B"
#if x > 0.64: return g(x),"M"
return x> 0.6 #g(x),"S"
def a12cmp(x,y):
if y - x > 0 : return 1
if y - x < 0 : return -1
else: return 0
a12s=0
def a12(lst1,lst2, gt= a12cmp):
"how often is x in lst1 more than y in lst2?"
global a12s
a12s += 1
def loop(t,t1,t2):
while t1.j < t1.n and t2.j < t2.n:
h1 = t1.l[t1.j]
h2 = t2.l[t2.j]
h3 = t2.l[t2.j+1] if t2.j+1 < t2.n else None
if gt(h1,h2) < 0:
t1.j += 1; t1.gt += t2.n - t2.j
elif h1 == h2:
if h3 and gt(h1,h3) < 0:
t1.gt += t2.n - t2.j - 1
t1.j += 1; t1.eq += 1; t2.eq += 1
else:
t2,t1 = t1,t2
return t.gt*1.0, t.eq*1.0
#--------------------------
lst1 = sorted(lst1, cmp=gt)
lst2 = sorted(lst2, cmp=gt)
n1 = len(lst1)
n2 = len(lst2)
t1 = Thing(l=lst1,j=0,eq=0,gt=0,n=n1)
t2 = Thing(l=lst2,j=0,eq=0,gt=0,n=n2)
gt,eq= loop(t1, t1, t2)
#print gt,eq,n1,n2
return gt/(n1*n2) + eq/2/(n1*n2)
class Counts(): # Add/delete counts of numbers.
def __init__(i,inits=[]):
i.n = i.mu = i.m2 = 0.0
for number in inits: i + number
def sd(i) :
if i.n < 2: return i.mu
else:
return (i.m2*1.0/(i.n - 1))**0.5
def __add__(i,x):
i.n += 1
delta = x - i.mu
i.mu += delta/(1.0*i.n)
i.m2 += delta*(x - i.mu)
def wildGuess(d,projects,opt):
tally = 0
for _ in xrange(opt.guesses):
project = any(projects)
tally += project[d.effort]
return tally*1.0/opt.guesses
def coconut(d,tests,opt,lvl=None,err=10**6,
a=10,b=1,ar=10,br=0.5):
"Chase good a,b settings"
#return 2.94,0.91
def efforts(a,b):
s=Score()
for project in tests:
got = effort(d,project,a,b)
want = project[d.effort]
s.seen(got,want)
return s.mmre()
if lvl == None: lvl=opt.levels
if lvl < 1 : return a,b
old = err
for _ in range(opt.samples):
a1 = a - ar + 2*ar*r()
b1 = b - br + 2*br*r()
tmp = efforts(a1,b1)
if tmp < err:
a,b,err = a1,b1,tmp
if (old - err)/old < opt.epsilon:
return a,b
else:
return coconut(d,tests,opt,lvl-1,err, a=a,b=b,
ar=ar*opt.shrink,
br=br*opt.shrink)
## sampple main
def main(model=nasa93):
xseed(1)
for shrink in [0.66,0.5,0.33]:
for sam in [5,10,20]:
for lvl in [5,10,20]:
for rnd in [0,1,2]:
opt=options()
opt.shrink=shrink
opt.samples=sam
opt.round = rnd
opt.levels = lvl
loo(model=model,opt=opt)
#########################################
# start up code
def mwu(l1,l2):
import numpy as np
from scipy.stats import mannwhitneyu
#print "l1>",map(g,sorted(l1))
#print "l2>",map(g,sorted(l2))
_, p_value = mannwhitneyu(np.array(l1),
np.array(l2))
return p_value <= 0.05
# for e in [1,2,4]:
# print "\n"
# l1 = [r()**e for _ in xrange(100)]
# for y in [1.01,1.1,1.2,1.3,1.4, 1.5]:
# l2 = map(lambda x: x*y,l1)
# print e,y,mwu(l1,l2)
def test1(repeats=10,models=[coc81],what='locOrNot'):
seed(1)
print repeats,what,map(lambda x:x.__name__,models)
#for m in [ newCIIdata, xyz14,nasa93,coc81]:
import time
detune=False
for m in models:
#(newCIIdataDeTune,True),#, #,
# (xyz14deTune,True)
# #(coc81,True),
#(nasa93,True)
# ]:
s1=Score(); s2=Score(); s3=Score(); s4=Score();
s5=Score(); s6=Score(); s7=Score(); s8=Score()
s9=Score(); s10=Score(); s11=Score(); s12=Score();
s13=Score(); s14=Score();
s15=Score(); s16=Score(); s17=Score(); s18=Score()
s19=Score(); s20=Score(); s21=Score();
s22=Score()
s23=Score()
s24=Score(); s25=Score(); s26=Score()
s27=Score(); s28=Score(); s29=Score()
s30=Score(); s31=Score(); s32=Score()
s33=Score(); s34=Score(); s35=Score()
s36=Score(); s37=Score(); s38=Score()
s39=Score(); s40=Score(); s41=Score()
s42=Score(); s43=Score(); s44=Score()
s45=Score(); s46=Score(); s47=Score()
s48=Score(); s49=Score();
# loc or no loc
exps =dict(locOrNot = [("coc2000",s2),("coconut",s3),
("loc(3)",s8), ("loc(1)",s18),
#('knear(3)',s16), ("knear(3) noloc",s20),
#('knear(1)',s17),("knear(1) noloc",s21)
],
basicRun = [("coc2000",s2),("coconut",s3),
('knear(3)',s16),('knear(1)',s17),
#("cluster(1)",s5),
("cluster(2)",s7),
("cart",s4)],
qualitative= [("coc2000",s2),("coconut",s3),
#('knear(3)',s16),('knear(1)',s17),
("coco2000(simp)",s13), ("coconut(simp)",s14),
("coco2000(lmh)",s11), ("coconut(lmh)",s12)],
other = [('(c=1)n-noloc',s9),('(c=2)n-noloc',s10)],
less = [("coc2000",s2),("coconut",s3),
("coco2000(lmh)",s11), ("coconut(lmh)",s12),
('coconut(lmh8)',s23),('coconut(lmh12)',s24),
('coconut(lmh16)',s25),
('coconut(lmh4)',s26)],
lessCols = [("coc2000",s2),("coconut",s3),
('coconut(just5)',s27),
('coconut(just10)',s28),
('coconut(just15)',s29)],
fssCols = [("coc2000",s2),("coconut",s3),
('coconut:c*0.25,r=4',s30),
('coconut:c*0.25,r=8',s31),
#('coconut:c*0.25,r=12',s32),
#('coconut:c*0.25,r=16',s33),
('coconut:c*0.5,r=4',s34),
('coconut:c*0.5,r=8',s35),
#('coconut:c*0.5,r=12',s36),
#('coconut:c*1,r=16',s37),
('coconut:c*1,r=4',s38),
('coconut:c*1,r=8',s39),
#('coconut:c*1,r=12',s40),
#('coconut:c*1,r=16',s41)
],
noise = [ ("cart",s4), ("cart/4",s42), ("cart/2",s43),
("coc2000",s2), ("coc2000n/4",s44), ("coc2000n/2",s45),
('coconut:c*0.5,r=8',s35), ('coconut:c*0.5r=8n/4',s46) , ('coconut:c*0.5,r=8n/2',s47),
('knear(1)',s17), ('knear(1)/4',s48), ('knear(1)/2',s49)
]
)
lst = exps[what]
print '%',what
for _,s in lst: s.use=True
t1=time.clock()
print "\n\\subsection{%s}" % m.__name__
say("%")
for i in range(repeats):
say(' ' + str(i))
loo(s1,s2,s3,s4,s5,s6,s7,s8,s9,s10,s11,s12,s13,
s14,s15,s16,s17,s18,s19,s20,s21,s22,s23,s24,s25,s26,
s27,s28,s29,s30,s31,s32,
s33,s34,s35,s36,s37,s38,s39,s40,s41,s42,s43,s44,s45,s46,s47,s48,s49,
model=m,detuning=detune)
global bs
global a12s
bs = a12=0
t2 = time.clock()
print "="
rdivDemo([[x[0]] + x[1].all for x in lst if x[1].all])
t3 = time.clock()
print "\n :learn",t2-t1,":analyze",t3-t2,":boots",bs,"effects",a12s,":conf",0.99**bs
#print 'B>', bootstrap([1,2,3,4,5],[1,2,3,4,5])
def knn(src,them,project,opt,k):
slots = where0(what= lambda x:cocVals(x,src.effort))
m0=Model(src.names,src.projects)
m1=clone(m0,them)
w = [None]*k
ws = 0
for i in range(k): ws += i+1
for i in range(k): w[i] = (i+1)/ws
w.reverse()
#w = [1/k]*k
dists =[(dist(m1,Row(cells=that),Row(cells=project),slots),that[src.effort])
for that in them]
est = 0
for w1,(_,x) in zip(w,sorted(dists)[:k]):
est += w1*x
return est
def cocVals(row,n):
if isinstance(row,Row):
row=row.cells
return row[:n]
def vasil(src,data,project):
all = src()
m0 = Model(all.names,all.projects)
m1 = clone(m0,data)
e = all.effort
slots = where0(what= lambda x:cocVals(x,e)
,klass=lambda x:x.cells[all.effort])
t = where(m1,m1.rows,slots)
row = Row(cells=project)
got1 = got2 = Num(slots.klass(r) for r in data).median()
(d1,c1),(d2,c2) = centroid2(row,t)
if c1 or c2:
w1,w2 = 1/(d1+0.0001), 1/(d2+0.0001)
e1 = c1.m.centroid()[e]
e2 = c2.m.centroid()[e]
got2 = (w1*e1 + w2*e2) / (w1+w2)
got1=myCentroid(row,t).m.centroid()[e]
#got1b=leaf(t,row,slots).m.centroid()[e]
return got1,got2
class Num:
"An Accumulator for numbers"
def __init__(i,name,inits=[]):
i.n = i.m2 = i.mu = 0.0
i.all=[]
i._median=None
i.name = name
i.rank = 0
for x in inits: i.add(x)
def s(i) : return (i.m2/(i.n - 1))**0.5
def add(i,x):
i._median=None
i.n += 1
i.all += [x]
delta = x - i.mu
i.mu += delta*1.0/i.n
i.m2 += delta*(x - i.mu)
def __add__(i,j):
return Num(i.name + j.name,i.all + j.all)
def quartiles(i):
def p(x) : return int(100*g(xs[x]))
i.median()
xs = i.all
n = int(len(xs)*0.25)
return p(n) , p(2*n) , p(3*n)
def median(i):
if not i._median:
i.all = sorted(i.all)
i._median=median(i.all)
return i._median
def __lt__(i,j):
return i.median() < j.median()
def spread(i):
i.all=sorted(i.all)
n1=i.n*0.25
n2=i.n*0.75
if len(i.all) <= 1:
return 0
if len(i.all) == 2:
return i.all[1] - i.all[0]
else:
return i.all[int(n2)] - i.all[int(n1)]
def different(l1,l2):
#return bootstrap(l1,l2) and a12(l2,l1)
return a12(l2,l1) and bootstrap(l1,l2)
def scottknott(data,cohen=0.3,small=3, useA12=False,epsilon=0.01):
"""Recursively split data, maximizing delta of
the expected value of the mean before and
after the splits.
Reject splits with under 3 items"""
#data = [d for d in data if d.spread() < 0.75]
all = reduce(lambda x,y:x+y,data)
#print sorted(all.all)
same = lambda l,r: abs(l.median() - r.median()) <= all.s()*cohen
if useA12:
same = lambda l, r: not different(l.all,r.all)
big = lambda n: n > small
return rdiv(data,all,minMu,big,same,epsilon)
def rdiv(data, # a list of class Nums
all, # all the data combined into one num
div, # function: find the best split
big, # function: rejects small splits
same, # function: rejects similar splits
epsilon): # small enough to split two parts
"""Looks for ways to split sorted data,
Recurses into each split. Assigns a 'rank' number
to all the leaf splits found in this way.
"""
def recurse(parts,all,rank=0):
"Split, then recurse on each part."
cut,left,right = maybeIgnore(div(parts,all,big,epsilon),
same,parts)
if cut:
# if cut, rank "right" higher than "left"
rank = recurse(parts[:cut],left,rank) + 1
rank = recurse(parts[cut:],right,rank)
else:
# if no cut, then all get same rank
for part in parts:
part.rank = rank
return rank
recurse(sorted(data),all)
return data
def maybeIgnore((cut,left,right), same,parts):
if cut:
if same(sum(parts[:cut],Num('upto')),
sum(parts[cut:],Num('above'))):
cut = left = right = None
return cut,left,right
def minMu(parts,all,big,epsilon):
"""Find a cut in the parts that maximizes
the expected value of the difference in
the mean before and after the cut.
Reject splits that are insignificantly
different or that generate very small subsets.
"""
cut,left,right = None,None,None
before, mu = 0, all.mu
for i,l,r in leftRight(parts,epsilon):
if big(l.n) and big(r.n):
n = all.n * 1.0
now = l.n/n*(mu- l.mu)**2 + r.n/n*(mu- r.mu)**2
if now > before:
before,cut,left,right = now,i,l,r
return cut,left,right
def leftRight(parts,epsilon=0.01):
"""Iterator. For all items in 'parts',
return everything to the left and everything
from here to the end. For reasons of
efficiency, take a first pass over the data
to pre-compute and cache right-hand-sides
"""
rights = {}
n = j = len(parts) - 1
while j > 0:
rights[j] = parts[j]
if j < n: rights[j] += rights[j+1]
j -=1
left = parts[0]
for i,one in enumerate(parts):
if i> 0:
if parts[i]._median - parts[i-1]._median > epsilon:
yield i,left,rights[i]
left += one
bs=0
def bootstrap(y0,z0,conf=0.01,b=1000):
"""The bootstrap hypothesis test from
p220 to 223 of Efron's book 'An
introduction to the boostrap."""
global bs
bs += 1
class total():
"quick and dirty data collector"
def __init__(i,some=[]):
i.sum = i.n = i.mu = 0 ; i.all=[]
for one in some: i.put(one)
def put(i,x):
i.all.append(x);
i.sum +=x; i.n += 1; i.mu = float(i.sum)/i.n
def __add__(i1,i2): return total(i1.all + i2.all)
def testStatistic(y,z):
"""Checks if two means are different, tempered
by the sample size of 'y' and 'z'"""
tmp1 = tmp2 = 0
for y1 in y.all: tmp1 += (y1 - y.mu)**2
for z1 in z.all: tmp2 += (z1 - z.mu)**2
s1 = (float(tmp1)/(y.n - 1))**0.5
s2 = (float(tmp2)/(z.n - 1))**0.5
delta = z.mu - y.mu
if s1+s2:
delta = delta/((s1/y.n + s2/z.n)**0.5)
return delta
def one(lst): return lst[ int(any(len(lst))) ]
def any(n) : return random.uniform(0,n)
y, z = total(y0), total(z0)
x = y + z
tobs = testStatistic(y,z)
yhat = [y1 - y.mu + x.mu for y1 in y.all]
zhat = [z1 - z.mu + x.mu for z1 in z.all]
bigger = 0.0
for i in range(b):
if testStatistic(total([one(yhat) for _ in yhat]),
total([one(zhat) for _ in zhat])) > tobs:
bigger += 1
return bigger / b < conf
def bootstrapd():
def worker(n=30,mu1=10,sigma1=1,mu2=10.2,sigma2=1):
def g(mu,sigma) : return random.gauss(mu,sigma)
x = [g(mu1,sigma1) for i in range(n)]
y = [g(mu2,sigma2) for i in range(n)]
return n,mu1,sigma1,mu2,sigma2,\
'different' if bootstrap(x,y) else 'same'
print worker(30, 10.1, 1, 10.2, 1)
print worker(30, 10.1, 1, 10.8, 1)
print worker(30, 10.1, 10, 10.8, 1)
def rdivDemo(data,max=100):
def z(x):
return int(100 * (x - lo) / (hi - lo + 0.00001))
data = map(lambda lst:Num(lst[0],lst[1:]),
data)
print ""
ranks=[]
for x in scottknott(data,useA12=True):
ranks += [(x.rank,x.median(),x)]
all=[]
for _,__,x in sorted(ranks):
all += x.quartiles()
all = sorted(all)
lo, hi = all[0], all[-1]
print "{\\scriptsize \\begin{tabular}{l@{~~~}l@{~~~}r@{~~~}r@{~~~}c}"
print "\\arrayrulecolor{darkgray}"
print '\\rowcolor[gray]{.9} rank & treatment & median & IQR & \\\\' #min= %s, max= %s\\\\' % (int(lo),int(hi))
last = None
for _,__,x in sorted(ranks):
q1,q2,q3 = x.quartiles()
pre =""
if not last == None and not last == x.rank:
pre= "\\hline"
print pre,'%2s & %12s & %s & %s & \quart{%s}{%s}{%s}{%s} \\\\' % \
(x.rank+1, x.name, q2, q3 - q1, z(q1), z(q3) - z(q1), z(q2),z(100))
last = x.rank
print "\\end{tabular}}"
def rdiv0():
rdivDemo([
["x1",0.34, 0.49, 0.51, 0.6],
["x2",6, 7, 8, 9] ])
def rdiv1():
rdivDemo([
["x1",0.1, 0.2, 0.3, 0.4],
["x2",0.1, 0.2, 0.3, 0.4],
["x3",6, 7, 8, 9] ])
def rdiv2():
rdivDemo([
["x1",0.34, 0.49, 0.51, 0.6],
["x2",0.6, 0.7, 0.8, 0.9],
["x3",0.15, 0.25, 0.4, 0.35],
["x4",0.6, 0.7, 0.8, 0.9],
["x5",0.1, 0.2, 0.3, 0.4] ])
def rdiv3():
rdivDemo([
["x1",101, 100, 99, 101, 99.5],
["x2",101, 100, 99, 101, 100],
["x3",101, 100, 99.5, 101, 99],
["x4",101, 100, 99, 101, 100] ])
def rdiv4():
rdivDemo([
["1",11,12,13],
["2",14,31,22],
["3",23,24,31],
["5",32,33,34]])
def rdiv5():
rdivDemo([
["1",11,11,11],
["2",11,11,11],
["3",11,11,11]])
def rdiv6():
rdivDemo([
["1",11,11,11],
["2",11,11,11],
["4",32,33,34,35]])
#rdiv0(); rdiv1(); rdiv2(); rdiv3(); rdiv4(); rdiv5(); rdiv6()
#exit()
repeats=10
exp='locOrNot'
models=['newCIIdataDeTune',
'xyz14deTune'
'coc81',
'nasa93']
if len(sys.argv)>=2:
repeats=eval(sys.argv[1])
if len(sys.argv)>=3:
exp=sys.argv[2]
if len(sys.argv)>3:
models=sys.argv[3:]
test1(repeats=repeats,models=map(eval,models),what=exp)
| unlicense |
brahmcapoor/naming-changes-complexity | analysis/subject_analysis.py | 1 | 8370 | from random import shuffle
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import csv
import argparse
import os
import shutil
"""
The script required for the data analysis. Requires several python libraries
to function but otherwise isn't too complicated.
"""
def generate_all_graphs():
number_of_subjects = len(os.listdir("../subject logs")) - 1
for i in range(1, number_of_subjects + 1):
try:
os.mkdir("Subject {}".format(i))
except FileExistsError:
shutil.rmtree("Subject {}".format(i))
os.mkdir("Subject {}".format(i))
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(i)
individual_graph(transparency_log_1, transparency_log_2, "Easy", i,
False)
individual_graph(transparency_log_3, transparency_log_4, "Hard", i,
False)
print("Generated graphs for subject {}".format(i))
def individual_graph(transparencies_1, transparencies_2, condition,
subject_number, display_graph=True):
x = [i for i in range(1, 81)]
sns.pointplot(x, transparencies_1, color='red')
plot = sns.pointplot(x, transparencies_2)
plot.set(xlabel="Trial", ylabel="Contrast",
title="{} Condition".format(condition))
if display_graph:
plt.show()
plot = plot.get_figure()
plot.savefig("Subject {}/{}.png".format(subject_number, condition))
plt.cla()
def find_turning_points(series):
turning_points = []
last_point = len(series) - 1
for i, point in enumerate(series):
if i != 0 and i != last_point:
if (point < series[i - 1] and point < series[i + 1]) or \
(point > series[i - 1] and point > series[i + 1]):
turning_points.append(point)
return turning_points
def find_threshold(log_1, log_2):
average_1 = 0
average_2 = 0
turning_points_1 = find_turning_points(log_1)
if turning_points_1:
average_1 = sum(turning_points_1)/len(turning_points_1)
else:
average_1 = 0
turning_points_2 = find_turning_points(log_2)
if turning_points_2:
average_2 = sum(turning_points_2)/len(turning_points_2)
else:
average_2 = 0
return (average_1 + average_2)/2
def load_subject_data(subject_number):
filename = "../subject logs/subject {}.csv".format(subject_number)
with open(filename, 'r') as f:
reader = csv.reader(f)
data = list(reader)[1:]
transparency_log_1 = [trial[1] for trial in data]
transparency_log_2 = [trial[3] for trial in data]
transparency_log_3 = [trial[5] for trial in data]
transparency_log_4 = [trial[7] for trial in data]
transparency_log_1 = list(map(lambda x: float(x), transparency_log_1))
transparency_log_2 = list(map(lambda x: float(x), transparency_log_2))
transparency_log_3 = list(map(lambda x: float(x), transparency_log_3))
transparency_log_4 = list(map(lambda x: float(x), transparency_log_4))
return (transparency_log_1, transparency_log_2, transparency_log_3,
transparency_log_4)
def check_subject_validity(subject_number):
with open('../subject logs/catch trials.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
subject_info = data[subject_number]
catch_trials_valid = int(subject_info[1]) > 29 and int(subject_info[2]) < 5
with open('../memory_results_after.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
subject_info = data[subject_number]
remembered_names_correctly = subject_info[2] == subject_info[3] and \
subject_info[4] == subject_info[5] and \
subject_info[6] == '' and \
subject_info[7] == ''
return catch_trials_valid and remembered_names_correctly
def graph_subject(subject_number):
try:
os.mkdir("Subject {}".format(subject_number))
except FileExistsError:
shutil.rmtree("Subject {}".format(subject_number))
os.mkdir("Subject {}".format(subject_number))
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(subject_number)
individual_graph(transparency_log_1, transparency_log_2, "Easy",
subject_number)
average_easy = find_threshold(transparency_log_1,
transparency_log_2)
print("Subject average for easy condition is {}".format(average_easy))
individual_graph(transparency_log_3, transparency_log_4, "Hard",
subject_number)
average_hard = find_threshold(transparency_log_3,
transparency_log_4)
print("Subject average for hard condition is {}".format(average_hard))
valid = check_subject_validity(int(subject_number))
if valid:
print("Subject is valid")
else:
print("Subject is invalid")
def show_summary_graph():
number_of_subjects = len(os.listdir("../subject logs")) - 1
subject_data = []
for i in range(1, number_of_subjects + 1):
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(i)
easy_threshold = find_threshold(transparency_log_1, transparency_log_2)
hard_threshold = find_threshold(transparency_log_3, transparency_log_4)
subject_data.append((i, easy_threshold, "Easy",
check_subject_validity(i)))
subject_data.append((i, hard_threshold, "Hard",
check_subject_validity(i)))
df = pd.DataFrame(subject_data, columns=["Subject", "Threshold",
"Condition", "Valid"])
print(df)
plot = sns.factorplot(data=df,
x="Subject",
y="Threshold",
hue="Condition",
linestyles=[" ", " "],
legend=False,
size=8,
aspect=2)
plt.legend(loc='upper left')
plot.set(xlabel="Subject Number",
ylabel="Contrast",
title="Summary of all subjects")
plt.show()
plot.savefig("Summary.png")
def generate_results_file():
number_of_subjects = len(os.listdir("../subject logs")) - 1
table = []
for i in range(1, number_of_subjects + 1):
transparency_log_1, transparency_log_2, transparency_log_3, \
transparency_log_4 = load_subject_data(i)
easy_threshold = find_threshold(transparency_log_1, transparency_log_2)
hard_threshold = find_threshold(transparency_log_3, transparency_log_4)
valid = check_subject_validity(i)
with open('../memory_results_after.csv', 'r') as f:
reader = csv.reader(f)
data = list(reader)
subject_info = data[i]
round_number = subject_info[1]
table.append([i, round_number, easy_threshold, hard_threshold, valid])
table = pd.DataFrame(table, columns=["Subject", "Round number",
"Easy Threshold", "Hard Threshold",
"Valid"])
table.to_csv("Summary.csv")
print("Results file can be found in Summary.csv")
def main():
plt.rcParams['figure.figsize'] = (18, 8)
parser = argparse.ArgumentParser()
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("-i", "--individual",
help="Analyze a particular subject",
action="store", metavar='')
group.add_argument("-s", "--summary", help="See a summary graph",
action="store_true")
group.add_argument("-r", "--result", help="Create a results file",
action="store_true")
args = parser.parse_args()
if args.individual:
if args.individual == 'a':
generate_all_graphs()
else:
graph_subject(args.individual)
if args.summary:
show_summary_graph()
if args.result:
generate_results_file()
if __name__ == '__main__':
main()
| mit |
wdurhamh/statsmodels | statsmodels/sandbox/examples/thirdparty/findow_0.py | 33 | 2147 | # -*- coding: utf-8 -*-
"""A quick look at volatility of stock returns for 2009
Just an exercise to find my way around the pandas methods.
Shows the daily rate of return, the square of it (volatility) and
a 5 day moving average of the volatility.
No guarantee for correctness.
Assumes no missing values.
colors of lines in graphs are not great
uses DataFrame and WidePanel to hold data downloaded from yahoo using matplotlib.
I haven't figured out storage, so the download happens at each run
of the script.
getquotes is from pandas\examples\finance.py
Created on Sat Jan 30 16:30:18 2010
Author: josef-pktd
"""
from statsmodels.compat.python import lzip
import numpy as np
import matplotlib.finance as fin
import matplotlib.pyplot as plt
import datetime as dt
import pandas as pa
def getquotes(symbol, start, end):
quotes = fin.quotes_historical_yahoo(symbol, start, end)
dates, open, close, high, low, volume = lzip(*quotes)
data = {
'open' : open,
'close' : close,
'high' : high,
'low' : low,
'volume' : volume
}
dates = pa.Index([dt.datetime.fromordinal(int(d)) for d in dates])
return pa.DataFrame(data, index=dates)
start_date = dt.datetime(2009, 1, 1)
end_date = dt.datetime(2010, 1, 1)
mysym = ['msft', 'ibm', 'goog']
indexsym = ['gspc', 'dji']
# download data
dmall = {}
for sy in mysym:
dmall[sy] = getquotes(sy, start_date, end_date)
# combine into WidePanel
pawp = pa.WidePanel.fromDict(dmall)
print(pawp.values.shape)
# select closing prices
paclose = pawp.getMinorXS('close')
# take log and first difference over time
paclose_ratereturn = paclose.apply(np.log).diff()
plt.figure()
paclose_ratereturn.plot()
plt.title('daily rate of return')
# square the returns
paclose_ratereturn_vol = paclose_ratereturn.apply(lambda x:np.power(x,2))
plt.figure()
plt.title('volatility (with 5 day moving average')
paclose_ratereturn_vol.plot()
# use convolution to get moving average
paclose_ratereturn_vol_mov = paclose_ratereturn_vol.apply(
lambda x:np.convolve(x,np.ones(5)/5.,'same'))
paclose_ratereturn_vol_mov.plot()
#plt.show()
| bsd-3-clause |
synthicity/pandana | pandana/loaders/pandash5.py | 5 | 2024 | import pandas as pd
def remove_nodes(network, rm_nodes):
"""
Create DataFrames of nodes and edges that do not include specified nodes.
Parameters
----------
network : pandana.Network
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
Returns
-------
nodes, edges : pandas.DataFrame
"""
rm_nodes = set(rm_nodes)
ndf = network.nodes_df
edf = network.edges_df
nodes_to_keep = ~ndf.index.isin(rm_nodes)
edges_to_keep = ~(edf['from'].isin(rm_nodes) | edf['to'].isin(rm_nodes))
return ndf.loc[nodes_to_keep], edf.loc[edges_to_keep]
def network_to_pandas_hdf5(network, filename, rm_nodes=None):
"""
Save a Network's data to a Pandas HDFStore.
Parameters
----------
network : pandana.Network
filename : str
rm_nodes : array_like
A list, array, Index, or Series of node IDs that should *not*
be saved as part of the Network.
"""
if rm_nodes is not None:
nodes, edges = remove_nodes(network, rm_nodes)
else:
nodes, edges = network.nodes_df, network.edges_df
with pd.HDFStore(filename, mode='w') as store:
store['nodes'] = nodes
store['edges'] = edges
store['two_way'] = pd.Series([network._twoway])
store['impedance_names'] = pd.Series(network.impedance_names)
def network_from_pandas_hdf5(cls, filename):
"""
Build a Network from data in a Pandas HDFStore.
Parameters
----------
cls : class
Class to instantiate, usually pandana.Network.
filename : str
Returns
-------
network : pandana.Network
"""
with pd.HDFStore(filename) as store:
nodes = store['nodes']
edges = store['edges']
two_way = store['two_way'][0]
imp_names = store['impedance_names'].tolist()
return cls(
nodes['x'], nodes['y'], edges['from'], edges['to'], edges[imp_names],
twoway=two_way)
| agpl-3.0 |
inkenbrandt/loggerloader | loggerloader/llgui.py | 1 | 107310 | "import matplotlib\n\nmatplotlib.use(\"TkAgg\")\nfrom matplotlib.backends.backend_tkagg import Navig(...TRUNCATED) | mit |
sbg2133/miscellaneous_projects | carina/lic_thesis_highSL.py | 1 | 6145 | "from getIQU import IQU\nfrom subprocess import call\nimport sys,os\nimport numpy as np\nimport glob(...TRUNCATED) | gpl-3.0 |
466152112/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | "# Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>\n#\n# License: BSD 3 clause\n\nimpo(...TRUNCATED) | bsd-3-clause |
End of preview. Expand
in Dataset Viewer.
No dataset card yet
New: Create and edit this dataset card directly on the website!
Contribute a Dataset Card- Downloads last month
- 3