repo_name
stringlengths 6
92
| path
stringlengths 4
191
| copies
stringclasses 322
values | size
stringlengths 4
6
| content
stringlengths 821
753k
| license
stringclasses 15
values |
---|---|---|---|---|---|
scikit-learn-contrib/py-earth | examples/plot_feature_importance.py | 3 | 2142 | """
===========================
Plotting feature importance
===========================
A simple example showing how to compute and display
feature importances, it is also compared with the
feature importances obtained using random forests.
Feature importance is a measure of the effect of the features
on the outputs. For each feature, the values go from
0 to 1 where a higher the value means that the feature will have
a higher effect on the outputs.
Currently three criteria are supported : 'gcv', 'rss' and 'nb_subsets'.
See [1], section 12.3 for more information about the criteria.
.. [1] http://www.milbo.org/doc/earth-notes.pdf
"""
import numpy
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestRegressor
from pyearth import Earth
# Create some fake data
numpy.random.seed(2)
m = 10000
n = 10
X = numpy.random.uniform(size=(m, n))
y = (10 * numpy.sin(numpy.pi * X[:, 0] * X[:, 1]) +
20 * (X[:, 2] - 0.5) ** 2 +
10 * X[:, 3] +
5 * X[:, 4] + numpy.random.uniform(size=m))
# Fit an Earth model
criteria = ('rss', 'gcv', 'nb_subsets')
model = Earth(max_degree=3,
max_terms=10,
minspan_alpha=.5,
feature_importance_type=criteria,
verbose=True)
model.fit(X, y)
rf = RandomForestRegressor()
rf.fit(X, y)
# Print the model
print(model.trace())
print(model.summary())
print(model.summary_feature_importances(sort_by='gcv'))
# Plot the feature importances
importances = model.feature_importances_
importances['random_forest'] = rf.feature_importances_
criteria = criteria + ('random_forest',)
idx = 1
fig = plt.figure(figsize=(20, 10))
labels = ['$x_{}$'.format(i) for i in range(n)]
for crit in criteria:
plt.subplot(2, 2, idx)
plt.bar(numpy.arange(len(labels)),
importances[crit],
align='center',
color='red')
plt.xticks(numpy.arange(len(labels)), labels)
plt.title(crit)
plt.ylabel('importances')
idx += 1
title = '$x_0,...x_9 \sim \mathcal{N}(0, 1)$\n$y= 10sin(\pi x_{0}x_{1}) + 20(x_2 - 0.5)^2 + 10x_3 + 5x_4 + Unif(0, 1)$'
fig.suptitle(title, fontsize="x-large")
plt.show()
| bsd-3-clause |
alphatwirl/alphatwirl | alphatwirl/collector/ToDataFrameWithDatasetColumn.py | 1 | 1326 | # Tai Sakuma <tai.sakuma@gmail.com>
import pandas as pd
from .ToTupleListWithDatasetColumn import ToTupleListWithDatasetColumn
##__________________________________________________________________||
class ToDataFrameWithDatasetColumn:
def __init__(self, summaryColumnNames,
datasetColumnName = 'component'
):
self.summaryColumnNames = summaryColumnNames
self.datasetColumnName = datasetColumnName
self.to_tuple_list = ToTupleListWithDatasetColumn(
summaryColumnNames = summaryColumnNames,
datasetColumnName = datasetColumnName)
def __repr__(self):
name_value_pairs = (
('summaryColumnNames', self.summaryColumnNames),
('datasetColumnName', self.datasetColumnName),
)
return '{}({})'.format(
self.__class__.__name__,
', '.join(['{} = {!r}'.format(n, v) for n, v in name_value_pairs]),
)
def combine(self, dataset_readers_list):
tuple_list = self.to_tuple_list.combine(dataset_readers_list)
if tuple_list is None:
return None
header = tuple_list[0]
contents = tuple_list[1:]
return pd.DataFrame(contents, columns = header)
##__________________________________________________________________||
| bsd-3-clause |
FilipDominec/python-meep-utils | scripts_postpro/plot_TY.py | 1 | 4307 | #!/usr/bin/env python
#-*- coding: utf-8 -*-
simtime = 80e-12
size_y = 1400e-6
c = 3e8
maxfreq = 2e12
## Import common moduli
import numpy as np
from scipy.constants import c, hbar, pi
import matplotlib, sys, os, time
import matplotlib.pyplot as plt
## Start figure + subplot (interactive)
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111, axisbg='w')
fig.subplots_adjust(left=.05, bottom=.05, right=.99, top=.99, wspace=.05, hspace=.05)
## Start figure + subplot (interactive)
fig = plt.figure(figsize=(10,10))
ax = plt.subplot(111, axisbg='w')
fig.subplots_adjust(left=.05, bottom=.05, right=.99, top=.99, wspace=.05, hspace=.05)
## Decide the filename to load data
import sys
filename = sys.argv[1] if len(sys.argv)>1 else 'input.dat'
if not os.path.isfile(filename): raise IOError, 'File %s can not be opened!' % filename
## Load n-dimensional arrays from a HDF5 file
import h5py
h5file = h5py.File(filename, "r")
print "Found datasets:", h5file.keys()
time1 = time.time()
data = np.array(h5file['ex.r']) * (1+0j)
data += np.array(h5file['ex.i']) * 1j
print "Loaded dataset with shape:", data.shape, 'in %04d s.' % (time.time()-time1)
try:
Et = data[:,-1,:] ## take the farthest slice by the z-axis
except IndexError:
Et = data ## if data already 2D
t = np.linspace(0, simtime, Et.shape[1]) ## define the dimension of data axes
y = np.linspace(0, size_y, Et.shape[0])
## Export n-dimensional arrays to a HDF5 file
## Fourier transform
freq = np.fft.fftfreq(len(t), d=(t[1]-t[0])) # calculate the frequency axis with proper spacing
Efy = np.fft.fft(Et, axis=1) / len(t) * 2*np.pi # calculate the FFT values
#def ffts(arr):
#return np.hstack([arr[len(arr)/2+1:], arr[:len(arr)/2]])
def ffts2(arr):
return np.vstack([arr[len(arr)/2:,:], arr[:len(arr)/2,:]])
#freq = ffts(freq)
#Efy = ffts2(Efy)
freq = np.fft.fftshift(freq) #+ freq[len(freq)/2]
Efy = np.fft.fftshift(Efy)
kT = np.fft.fftfreq(len(y), d=(y[1]-y[0])) # calculate the frequency axis with proper spacing
Ef = np.fft.fft(Efy, axis=0) / len(y) * 2*np.pi # calculate the FFT values
kT = np.fft.fftshift(kT)
#Ef = np.fft.fftshift(Ef)
print Ef.shape
Ef = ffts2(Ef)
print Ef.shape
truncated = np.logical_and(freq>0, freq<maxfreq) # (optional) get the frequency range
freq = freq[truncated]
Ef = Ef[:,truncated]
print 'freq', freq.shape, freq[::10]
print 'kT', kT.shape, kT[::10]
## plot contours for gridded data
#contours = plt.contourf(t, y, np.log10(np.abs(et)+1e-6), cmap=matplotlib.cm.gist_earth, extend='both') # levels=np.arange(0.,1,.01),
#contours = plt.contourf(t, y, et, cmap=matplotlib.cm.rdbu, extend='both') # levels=np.arange(0.,1,.01),
toplot = (np.abs(Et))
contours = plt.contourf(t, y, toplot, cmap=matplotlib.cm.gist_earth, levels=np.linspace(np.min(toplot)*0+np.max(toplot)*0,np.max(toplot),200) ,extend='both') #
#contours = plt.contourf(freq, kT, np.abs(Ef), cmap=matplotlib.cm.gist_earth, extend='both') # levels=np.arange(0.,1,.01),
#plt.plot([0, maxfreq], [0, 0], c='w',lw=.5)
#plt.plot([0, maxfreq], [0, maxfreq/c], c='w',lw=.5)
#plt.plot([0, maxfreq], [0, -maxfreq/c], c='w',lw=.5)
#plt.annotate('+45$^\\circ$', xy = (maxfreq/2, maxfreq/2/c), xytext = (-10, 10), textcoords='offset points',color='w')
#plt.annotate('-45$^\\circ$', xy = (maxfreq/2, -maxfreq/2/c), xytext = (10, 10), textcoords='offset points',color='w')
#
try:
## Load 1D curve
filename = "effparam.dat"
(x, y) = np.loadtxt(filename, usecols=(0,5), unpack=True)
truncated = np.logical_and(x>0, x<maxfreq) # (optional) get the frequency range
x = x[truncated]
y = y[truncated]
## Plot line
plt.plot(x, np.real(y)*1000, color="#FF8800", label=u"$y'$", ls='-', c='w',lw=1)
except:
print "refractive index could not be loaded"
for contour in contours.collections: contour.set_antialiased(False) ## optional: avoid white aliasing (for matplotlib 1.0.1 and older)
plt.colorbar() ## optional: colorbar
## Finish the plot + save
#plt.ylim((-2e4,2e4))
plt.xlabel(u"time");
plt.ylabel(u"y");
plt.grid()
plt.legend(prop={'size':10}, loc='upper right')
plt.savefig("output_T-Y.png", bbox_inches='tight')
| gpl-2.0 |
cgalleguillosm/accasim | accasim/utils/plot_factory.py | 1 | 51706 | """
MIT License
Copyright (c) 2017 cgalleguillosm, AlessioNetti
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import matplotlib.pyplot as plt
import matplotlib.patches as patches
from matplotlib.backends.backend_pdf import PdfPages
from math import floor
from accasim.utils.reader_class import DefaultReader
from accasim.utils.misc import load_config, from_isodatetime_2_timestamp as timestamp_func, str_resources
from accasim.utils.file import path_leaf, load_jsonfile
from accasim.base.resource_manager_class import Resources
from accasim.experimentation.schedule_parser import define_result_parser
from accasim.utils.misc import DEFAULT_SIMULATION
from copy import deepcopy
from os.path import splitext, join
from scipy.signal import savgol_filter
from os.path import isfile
import numpy as np
from matplotlib.pyplot import boxplot
class PlotFactory:
"""
A class for plot production and schedule files pre-processing.
In this class, some basic algorithms are implemented for pre-processing the schedule files produced through
simulation, and for producing some common evaluation plots.
"""
SCHEDULE_CLASS = 'schedule'
BENCHMARK_CLASS = 'benchmark'
SLOWDOWN_PLOT = 'slowdown'
QUEUE_SIZE_PLOT = 'queue_size'
LOAD_RATIO_PLOT = 'load_ratio'
EFFICIENCY_PLOT = 'efficiency'
SCALABILITY_PLOT = 'scalability'
SIMULATION_TIME_PLOT = 'sim_time'
SIMULAION_MEMORY_PLOT = 'sim_memory'
PLOT_TYPES = {
SCHEDULE_CLASS: [SLOWDOWN_PLOT, QUEUE_SIZE_PLOT, LOAD_RATIO_PLOT, EFFICIENCY_PLOT],
BENCHMARK_CLASS: [SCALABILITY_PLOT, SIMULATION_TIME_PLOT, SIMULAION_MEMORY_PLOT]
}
def __init__(self, plot_class, sim_params_fname=None, config=None, resource=None, workload_parser=None, debug=False):
"""
The constructor for the class.
:param plot_class: the plot_class of files to be analyzed. Can be either 'schedule', if schedule files are going to be
analyzed, or 'benchmark' if resource usage log files will be analyzed;
:params sim_params_fname:
:param config: The path to a system configuration file. Needed for the schedule meta-simulation;
:param resource: a resource type in the system to be considered. If specified, all resource-related statistics
will be computed in regards to this resource alone;
:param workload_parser:
:param debug: Debug flag.
"""
self._debug = debug
if not (plot_class in self.PLOT_TYPES.keys()):
if self._debug:
print('Wrong Plot plot_class chosen. Selecting schedule plot_class by default...')
plot_class = self.SCHEDULE_CLASS
self._plot_class = plot_class
self._sim_params_fname = sim_params_fname # if sim_params_fname is not None and isfile(sim_params_fname) else None
self._config = config
self._resource = resource
self._workload_parser = workload_parser
self._preprocessed = False
self._filepaths = []
self._labels = []
self._slowdowns = []
self._queuesizes = []
self._loadratiosX = []
self._loadratiosY = []
self._efficiencies = []
self._simdata = []
self._schedtimes = []
self._mantimes = []
self._simmemory = []
self._scalabilitydataX = []
self._scalabilitydataY = []
self._resource_order = None
if self._sim_params_fname is None:
self._resource_order = DEFAULT_SIMULATION['RESOURCE_ORDER']
# Base resource availability per-node (never changes)
self._base_res = {}
# Current resource availability per-node
self._sys_res = {}
# Aggregated used resources for all nodes
self._used_res_sum = {}
# Aggregate base resource availability for used nodes only
self._avl_res_sum = {}
# Aggregated base resource availability for all nodes
self._base_res_sum = {}
# Amount of currently used nodes
self._used_nodes = 0
# Number of total nodes in the system
self._total_nodes = 0
def set_files(self, paths, labels):
"""
Set the paths and labels of the files to be analyzed.
:param paths: A list of filepaths related to the files to be analyzed;
:param labels: the labels associated to each single file, used in the plots; must have the same length as paths;
"""
self._preprocessed = False
if not isinstance(paths, (list, tuple)):
self._filepaths = [paths]
self._labels = [labels]
else:
self._filepaths = paths
self._labels = labels
if len(self._filepaths) != len(self._labels):
if self._debug:
print("Filepaths and Labels lists must have the same lengths.")
self._labels = []
self._filepaths = []
def pre_process(self, trimSlowdown=True, trimQueueSize=False):
"""
Performs pre-processing on all specified files, according to their type.
If the files are of the schedule type, a meta-simulation is run for each of them, computing data like slowdown,
queue size, load ratios and such. If the data is of the benchmark type, the files are simply parsed and their
information stored.
:param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True
:param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False
"""
if not self._preprocessed:
# Perform pre-processing for schedule files
if self._plot_class == self.SCHEDULE_CLASS:
self._slowdowns = []
self._queuesizes = []
self._loadratiosX = []
self._loadratiosY = []
self._efficiencies = []
self._preprocessed = True
for f in self._filepaths:
# If an error is encountered on one of the files, the process is aborted
if not self._getScheduleData(f, self._config, self._resource, trimSlowdown, trimQueueSize):
self._preprocessed = False
break
# Perform pre-processing for benchmark files
elif self._plot_class == self.BENCHMARK_CLASS:
self._simdata = []
self._schedtimes = []
self._mantimes = []
self._simmemory = []
self._scalabilitydataX = []
self._scalabilitydataY = []
self._preprocessed = True
for f in self._filepaths:
if not self._getBenchmarkData(f):
self._preprocessed = False
break
if not self._preprocessed:
print("Could not process files, please ensure they are in the correct path and format.")
return self._preprocessed
def produce_plot(self, type, title='', scale='linear', xlim=(None, None), ylim=(None, None), legend=True, figsize=(7, 5), meansonly=False, alpha=0.005, smooth=30, output='Output.pdf', groups=1, **kwargs):
"""
Produces a single plot on the pre-processed files.
The user can produce plots among the available types. These are:
- slowdown: a box-plot distribution plot for slowdown values across test instances
- queue_size: a box-plot for queue size in the simulation across test instances
- load_ratio: a distribution scatter plot for the load ratio in function of the number of used nodes, for
test instances separately;
- efficiency: a box-plot for resource allocation efficiency across test instances
- scalability: a scalability plot for dispatching methods across test instances
- sim_time: a bar plot for the simulation timings across test instances
- sim_memory: a bar plot for memory usage across test instances
:param type: the type of the plot, must be one of the above;
:param title: the title of the plot;
:param scale: the scale of the plot (see matplotlib documentation);
:param xlim: the left-right bounds for axis scaling, is a tuple;
:param ylim: the bottom-top bounds for axis scaling, is a tuple;
:param legend: activates the legend, is a boolean;
:param figsize: the size of the figure, is a tuple;
:param meansonly: triggers the plot of mean values alone in box-plots, is a boolean;
:param alpha: the alpha of certain features in plots, in particular for distribution scatter plots;
:param smooth: smoothing factor used for the Savitzky-Golay filter in the scalabily plot. The lower the number,
the higher the smoothing;
:param output: path of the output PDF file;
"""
if not self._preprocessed:
self.pre_process()
print("Plot_factory: Files were not pre-processed yet. Calling the pre_process method.")
if type == self.SLOWDOWN_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.box_plot(self._slowdowns, title=title, ylabel='Slowdown', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs)
elif type == self.QUEUE_SIZE_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.box_plot(self._queuesizes, title=title, ylabel='Queue size', scale=scale, xlim=xlim, ylim=(0, None), figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs)
elif type == self.LOAD_RATIO_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.distribution_scatter_plot(self._loadratiosX, self._loadratiosY, title=title, scale=scale, xlim=(-0.01, 1.01), ylim=(-0.01, 1.01), figsize=figsize, alpha=alpha, output=output, **kwargs)
elif type == self.EFFICIENCY_PLOT and self._plot_class == self.SCHEDULE_CLASS:
self.box_plot(self._efficiencies, title=title, ylabel='Resource efficiency', scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, meansonly=meansonly, output=output, groups=groups, **kwargs)
elif type == self.SCALABILITY_PLOT and self._plot_class == self.BENCHMARK_CLASS:
self.scalability_plot(self._scalabilitydataX, self._scalabilitydataY, title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, smooth=smooth, output=output, **kwargs)
elif type == self.SIMULATION_TIME_PLOT and self._plot_class == self.BENCHMARK_CLASS:
self.box_plot_times(self._mantimes, self._schedtimes, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs)
elif type == self.SIMULAION_MEMORY_PLOT and self._plot_class == self.BENCHMARK_CLASS:
self.box_plot_memory(self._simmemory, title=title, scale=scale, xlim=xlim, ylim=ylim, figsize=figsize, legend=legend, output=output, **kwargs)
else:
raise Exception("Plot type specified is not valid. Review the documentation for valid plot types.")
def _getBenchmarkData(self, filepath):
"""
Pre-processes a resource usage log file.
:param filepath: the path to the log file;
:return: True if successful, False otherwise;
"""
if self._debug:
print("- Pre-processing file " + filepath + "...")
# Tries to read from the file, aborts if an error is encountered
try:
f = open(filepath)
mantimes = []
schedtimes = []
mems = []
simtime = 0
disptime = 0
maxqueuesize = 0
for line in f:
# Each line is parsed and values are extracted from it
attrs = line.split(';')
mantimes.append(float(attrs[4]))
schedtimes.append((int(attrs[1]), float(attrs[3])))
mems.append(float(attrs[5]))
simtime += float(attrs[2])
disptime += float(attrs[3])
if int(attrs[1]) > maxqueuesize:
maxqueuesize = int(attrs[1])
f.close()
except Exception as e:
raise Exception("Error encountered while pre-processing: " + str(e))
# Certain statistics are computed from the data
data = {}
data['avgman'] = np.average(np.array(mantimes))
data['avgsched'] = np.average(np.array([el[1] for el in schedtimes]))
data['simtime'] = simtime / 1000.0
data['schedtime'] = disptime / 1000.0
data['mantime'] = data['simtime'] - data['schedtime']
data['avgmem'] = np.average(np.array(mems))
data['maxmem'] = np.max(np.array(mems))
# The scalability data is computed through binning: we want to obtain an X, Y set, where in X are the distinct
# queue sizes, and in Y are the average times in ms to perform dispatching on such queue sizes
binningfactor = 1
bins = int(floor(maxqueuesize / binningfactor))
queuevalues = np.linspace(0, maxqueuesize, bins)
mappinglist = []
for i in range(bins):
mappinglist.append([])
step = (maxqueuesize) / (bins - 1)
for qsize, stime in schedtimes:
index = int(floor(qsize / step))
mappinglist[index].append(stime)
finallist = []
finalqueuevalues = []
for i in range(len(mappinglist)):
l = mappinglist[i]
if len(l) > 0:
finallist.append(sum(l) / len(l))
finalqueuevalues.append(queuevalues[i])
self._mantimes.append(mantimes)
self._schedtimes.append([el[1] for el in schedtimes])
self._simmemory.append(mems)
self._simdata.append(data)
self._scalabilitydataX.append(finalqueuevalues)
self._scalabilitydataY.append(finallist)
return True
def _getScheduleData(self, filepath, config, resource=None, trimSlowdown=True, trimQueueSize=False):
"""
Performs pre-processing on a schedule file through a meta-simulation process.
:param filepath: The path of the file to be analyzed;
:param config: The path to the system configuration file;
:param resource: A resource to be considered for resource-related metrics; if none is specified, all resource
types are used;
:param: trimSlowdown: boolean flag. If True, slowdown values equal to 1 will be discarded. Default is True
:param: trimQueueSize: boolean flag. If True, queue size values equal to 0 will be discarded. Default is False
:return: True if successful, False otherwise;
"""
if self._debug:
print("- Pre-processing file " + filepath + "...")
# Generates the dictionary of system resources from the config file
resobject, equiv = self._generateSystemConfig(config)
self._base_res = resobject.availability()
res_types = resobject._system_resource_types
# Makes sure the resource type exists in the system
if resource is not None and resource not in resobject._system_resource_types:
if self._debug:
print("Resource type " + resource + "is not valid. Using all available resources...")
resource = None
# Tries to read from the log file, aborts if an error is encountered
try:
_sim_params_path = None
# If the simulator config path points to a file, it is considered as is
if self._sim_params_fname is not None and isfile(self._sim_params_fname):
_sim_params_path = self._sim_params_fname
# If it is a plain string, it is used as a token for config files in the experimentation
elif self._sim_params_fname is not None:
_path, _filename = path_leaf(filepath)
_sim_params_path = join(_path, self._sim_params_fname)
# If it is none, the default_result_parser will use the DEFAULT_SIMULATION config
if _sim_params_path is not None:
_resource_order = load_jsonfile(_sim_params_path)['RESOURCE_ORDER']
else:
_resource_order = self._resource_order
if self._workload_parser is not None:
reader = DefaultReader(filepath, parser=self._workload_parser, equivalence=equiv)
else:
reader = DefaultReader(filepath, parser=define_result_parser(_sim_params_path), equivalence=equiv)
slowdowns = []
timePoints = set()
jobs = {}
rev_timePoints = {}
if self._debug:
print("Loading jobs...")
while True:
# Jobs are read and their slowdown values are stored
job = reader._read()
if job is not None:
job['start_time'] = timestamp_func(job['start_time'])
job['end_time'] = timestamp_func(job['end_time'])
job['queue_time'] = timestamp_func(job['queue_time'])
_start_time = job['start_time']
_end_time = job['end_time']
_queued_time = job['queue_time']
duration = _end_time - _start_time
wait = _start_time - _queued_time
slowdown = (wait + duration) / duration if duration != 0 else wait if wait != 0 else 1.0
if slowdown > 1.0 or not trimSlowdown:
slowdowns.append(slowdown)
job_id = job['job_id']
jobs[job_id] = job
# Timepoints for use in the simulation are stored
timePoints.add(_queued_time)
self._addToDictAsList(rev_timePoints, _queued_time, job_id, 'queue')
timePoints.add(_start_time)
self._addToDictAsList(rev_timePoints, _start_time, job_id, 'start')
if duration > 0:
timePoints.add(_end_time)
self._addToDictAsList(rev_timePoints, _end_time, job_id, 'end')
else:
break
except Exception as e:
raise Exception("Error encountered while pre-processing: " + str(e))
# It may happen that the slowdown list is empty if all jobs have a value equal to 1. In this case we add
# a fake value, equal to 1 as well
if trimSlowdown and len(slowdowns) == 0:
slowdowns.append(1)
if self._debug:
print("Jobs loaded. Sorting...")
# We compute the final set of distinct, ordered timepoints
timePoints = sorted(timePoints)
timePointsIDX = 0
self._sys_res = deepcopy(self._base_res)
self._base_res_sum = {k: sum(self._base_res[n][k] for n in self._base_res) for k in res_types}
self._used_res_sum = {k: 0 for k in res_types}
self._avl_res_sum = {k: 0 for k in res_types}
self._used_nodes = 0
self._total_nodes = len(self._base_res.values())
queue = set()
running = set()
# Pre-allocating the lists to store performance metrics, for efficiency
queued = [0] * len(timePoints) # []
resources = [0] * len(timePoints) # []
run = [0] * len(timePoints) # []
efficiency = [0] * len(timePoints) # []
efficiencyperjob = [0] * len(jobs) # []
efficiencyIDX = 0
if self._debug:
print("Sorting done. Starting simulation...")
# Meta-simulation: goes on until there are no more timepoints to consider
while timePointsIDX < len(timePoints):
point = timePoints[timePointsIDX]
timePointsIDX += 1
# Adds to the queue jobs that were submitted in this timepoint
jobstoqueue = rev_timePoints[point]['queue']
# queue += len(jobstoqueue)
queue.update(jobstoqueue)
# Jobs that have terminated release their resources
jobstoend = rev_timePoints[point]['end']
if len(jobstoend) > 0:
for j_id in jobstoend:
j = jobs[j_id]
req, assignations = self._getRequestedResources(_resource_order, j['assignations'])
self._deallocate_resources(req, assignations, resource)
# running -= len(jobstoend)
running = running - jobstoend
# Jobs that have to start take their resources from the system
jobstostart = rev_timePoints[point]['start']
if len(jobstostart) > 0:
for j_id in jobstostart:
j = jobs[j_id]
if j['end_time'] - j['start_time'] > 0:
req, assignations = self._getRequestedResources(_resource_order, j['assignations'])
self._allocate_resources(req, assignations, resource)
# running += 1
running.add(j_id)
# queue -= len(jobstostart)
queue = queue - jobstostart
# Additionally, we store for every started job its resource allocation efficiency
for j_id in jobstostart:
j = jobs[j_id]
if j['end_time'] - j['start_time'] > 0:
req, assignations = self._getRequestedResources(_resource_order, j['assignations'])
eff = self._getResourceEfficiency(req, assignations, self._sys_res, resource)
efficiencyperjob[efficiencyIDX] = eff
efficiencyIDX += 1
# System metrics are computed AFTER dispatching
queued[timePointsIDX - 1] = len(queue) # queue
run[timePointsIDX - 1] = len(running) # running
resources[timePointsIDX - 1] = self._getLoadRatio(resource)
efficiency[timePointsIDX - 1] = self._getLoadRatioSelective(resource)
if self._debug:
print("Simulation done!")
if trimQueueSize:
queued = [q for q in queued if q != 0]
run = [r for r in run if r != 0]
# The metrics values for this instance are added to the internal variables
self._slowdowns.append(slowdowns)
self._queuesizes.append(queued)
self._efficiencies.append(efficiencyperjob)
self._loadratiosX.append([el[0] for el in efficiency])
self._loadratiosY.append([el[1] for el in efficiency])
return True
def _addToDictAsList(self, dict, key, el, type):
"""
Simple method that adds an element to a dictionary and creates sub-entries if needed.
:param dict: The target dictionary
:param key: The key of the element to add
:param el: The element to add
:param type: The type of the element to add, used in the sub-dictionary for the key entry
:return: None
"""
if key not in dict:
dict[key] = {'queue': set(), 'start': set(), 'end': set()}
dict[key][type].add(el)
def _allocate_resources(self, req, assignations, resource=None):
"""
Method that allocates the resources for a certain starting job and updates all data structures related to
resource usage
:param req: The resource request of the job
:param assignations: The list of nodes assigned to the job
:param resource: A resource type to be considered for performance metrics (optional)
:return: None
"""
for node in assignations:
# If the node goes from the unused to the used state, we update the number of used nodes and the amount
# of available resources among the used nodes, for the efficiency plots
if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()):
self._used_nodes += 1
for k, v in self._base_res[node].items():
self._avl_res_sum[k] += v
# If a specific resource type is considered, the same condition is triggered only if such resource is used
elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0:
self._used_nodes += 1
self._avl_res_sum[resource] += self._base_res[node][resource]
# Updating the per-node currently available resources
for k, val in req.items():
self._sys_res[node][k] -= val
if self._sys_res[node][k] < 0:
self._sys_res[node][k] = 0
if self._debug:
print("Caution: resource " + k + " is going below zero.")
# Updating the dictionary of per-type currently used resources
for k, v in req.items():
self._used_res_sum[k] += v * len(assignations)
if self._used_res_sum[k] > self._avl_res_sum[k]:
self._used_res_sum[k] = self._avl_res_sum[k]
def _deallocate_resources(self, req, assignations, resource):
"""
Method that de-allocates the resources for a certain starting job and updates all data structures related to
resource usage
:param req: The resource request of the job
:param assignations: The list of nodes assigned to the job
:param resource: A resource type to be considered for performance metrics (optional)
:return: None
"""
for node in assignations:
for k, val in req.items():
self._sys_res[node][k] += val
if self._sys_res[node][k] > self._base_res[node][k]:
self._sys_res[node][k] = self._base_res[node][k]
if self._debug:
print("Caution: resource " + k + " is going beyond its base capacity.")
# In this case the check for used-unused nodes must be performed after the resources are de-allocated
if resource is None and all(self._sys_res[node][k] == self._base_res[node][k] for k in self._base_res[node].keys()):
self._used_nodes -= 1
for k, v in self._base_res[node].items():
self._avl_res_sum[k] -= v
elif resource is not None and self._sys_res[node][resource] == self._base_res[node][resource] and req[resource] > 0:
self._used_nodes -= 1
self._avl_res_sum[resource] -= self._base_res[node][resource]
# The method is specular to allocate_resources and works identically
for k, v in req.items():
self._used_res_sum[k] -= v * len(assignations)
if self._used_res_sum[k] < 0:
self._used_res_sum[k] = 0
def _generateSystemConfig(self, config_path):
"""
Generates a Resources object from a system configuration file.
:param config_path: the path to the config file;
:return: the Resources object and the resource equivalence;
"""
try:
config = load_config(config_path)
equiv = config.pop('equivalence', {})
# PEP 448 - Additional Unpacking Generalizations
# python 3.5 and newer
if not('node_prefix' in config):
config['node_prefix'] = ''
resources = Resources(**config)
return resources, equiv
except Exception as e:
if config_path != '':
print("Could not load system config: " + str(e))
else:
print("A system configuration file must be specified.")
exit()
return None, None
def _getRequestedResources(self, _resource_order, assignations_str):
"""
TO BE IMPLEMENTED:
returns the requested resources for the input job.
:param job: the dictionary related to the current job;
:return: the dictionary of resources needed by each job unit, and the list of node assignations;
"""
_assignations_list = assignations_str.split(str_resources.SEPARATOR)[0:-1]
_nodes_list = [assign.split(';')[0] for assign in _assignations_list]
_request = { k:int(v) for k, v in zip(_resource_order, _assignations_list[0].split(';')[1:])}
return _request, _nodes_list
def _getResourceEfficiency(self, reqres, nodes, sys_res, resource):
"""
Computes the resource allocation efficiency metric for a certain input job.
This method computed the resource allocation efficiency AFTER dispatching is performed, not before.
:param reqres: the dictionary of resources requested by each job unit;
:param nodes: the list of node assignations;
:param sys_res: the dictionary of system resources;
:param resource: the resource type to be considered (if present);
:return: the resource allocation efficiency;
"""
# Computing the amount of used resources by the job
if resource is None:
used = sum(r * len(nodes) for r in reqres.values())
else:
used = reqres[resource] * len(nodes)
avl = 0
# Computing the amount of available resources in nodes used by the job
for node in set(nodes):
if resource is None:
avl += sum(r for r in sys_res[node].values())
else:
avl += sys_res[node][resource]
return used / (avl + used)
def _getLoadRatio(self, resource):
"""
Returns the standard load ratio for the system.
:param resource: the resource type to be considered (if present);
:return: the load ratio;
"""
loadratio = 0
if resource is None:
loadratio = sum(self._used_res_sum.values()) / sum(self._base_res_sum.values())
elif resource in self._base_res_sum:
loadratio = self._used_res_sum[resource] / self._base_res_sum[resource]
return loadratio
def _getLoadRatioSelective(self, resource):
"""
Returns the per-step resource allocation efficiency.
This is defined as a X,Y pair where X expresses the fraction of used nodes, and Y defines the fraction of used
resources in such nodes.
:param resource: the resource type to be considered (if present);
:return: an X,Y pair expressing the per-step resource allocation efficiency;
"""
loadratio = 0
if self._used_nodes > 0:
if resource is None:
loadratio = sum(self._used_res_sum.values()) / sum(self._avl_res_sum.values())
elif resource in self._avl_res_sum:
loadratio = self._used_res_sum[resource] / self._avl_res_sum[resource]
return self._used_nodes / self._total_nodes, loadratio
else:
return 0, 0
def _getDistributionStats(self, data):
"""
Returns some useful distribution statistics for the input data.
The mean, minimum, maximum, median, and quartiles for the data are computed.
:param data: The iterable for the input data;
:return: a dictionary of statistics for the data distribution;
"""
stats = {}
stats['avg'] = np.average(data)
stats['min'] = np.min(data)
stats['max'] = np.max(data)
stats['median'] = np.median(data)
stats['quartiles'] = np.percentile(data, range(0, 100, 25))
return stats
def box_plot(self, data, title='', ylabel='', scale='linear', figsize=(7, 5), meansonly=False, output='Output.pdf', groups=1, **kwargs):
"""
Produces a box-and-whiskers plot for the input data's distributions.
:param data: the input data; must be a list, in which each element is again a list containing all of the data
regarding a certain test instance; the ordering must be that of the labels;
:param title: the title of the plot;
:param ylabel: the Y-axis label;
:param scale: the scale of the plot;
:param figsize: the size of the figure, is a tuple;
:param meansonly: if True only the mean values for each distribution are depicted;
:param output: the path to the output file;
:param **kwargs:
- fig_format: {
'format': eps or pdf,
'dpi': Int number
}
- xlim: the left-right axis boundaries, is a tuple;
- ylim: the bottom-top axis boundaries, is a tuple;
"""
color_cycler = ['b', 'r', 'y', 'g', 'c', 'm', 'k', 'w']
hatch_cycler = ['/', '\\', '|', '-', '+', 'x', 'o', 'O', '.', '*']
ncycle = 2
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
N = len(data)
ylim = kwargs.pop('ylim', None)
xlim = kwargs.pop('xlim', None)
show_legend = kwargs.pop('show_legend', False)
spacing = 0.2
ind = [i * spacing for i in np.arange(N)]
width = 0.1
markersize = 250
linecol = 'black'
tricol = 'black'
vertlinecol = 'gray'
fig, ax = plt.subplots(figsize=figsize)
c_group = 0
c = groups
r_hatch = len(hatch_cycler)
color_list = []
hatch_list = []
for i, d in enumerate(data):
color_list.append(color_cycler[c_group])
hatch_list.append(hatch_cycler[len(hatch_cycler) - r_hatch] * ncycle)
c -= 1
if c == 0:
c_group += 1
c = groups
r_hatch -= 1
if r_hatch == 0:
ncycle += 1
r_hatch = len(hatch_cycler)
bp = ax.boxplot(data, labels=self._labels, patch_artist=True, sym="", whis=[0, 100], showmeans=True, showfliers=False)
for patch, color, hatch in zip(bp['boxes'], color_list, hatch_list):
patch.set_facecolor(color)
patch.set_alpha(0.75)
patch.set_hatch(hatch)
# add some text for labels, title and axes ticks
ax.set_ylabel(ylabel, fontsize=fontsize)
ax.set_xlabel('Dispatching method', fontsize=fontsize)
ax.set_title(title)
ax.set_yscale(scale)
if show_legend:
ax.legend(bp['boxes'], self._labels, bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=len(self._labels) // 2, mode="expand", borderaxespad=0.)
if ylim:
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
if xlim:
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.tight_layout()
plt.grid(linestyle=':', color='gray', zorder=0)
plt.show()
fig_format = kwargs.pop('fig_format', {})
fig.savefig(output, **fig_format)
def box_plot_times(self, dataman, datasched, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'):
"""
Produces a bar plot for the timings in the simulations, across test instances.
The bars will depict the average time required to perform dispatching in each simulation step, and the
time required to perform simulation-related tasks in the simulation.
:param dataman: the data for the time required in each step to perform simulation-related tasks. Is a list,
where each element is again a list containing the data for a certain test instance;
:param datasched: the data for the time required in each step to perform dispatching. Is a list, where
each element is again a list containing the data for a certain test instance;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param legend: enables or disables visualization of the legend;
:param output: the path to the output file;
"""
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
N = len(dataman)
spacing = 0.2
ind = [i * spacing for i in np.arange(N)]
width = 0.1
markersize = 250
fig, ax = plt.subplots(figsize=figsize)
for i in range(N):
avgman = np.average(np.array(dataman[i]))
avgsched = np.average(np.array(datasched[i]))
if i == 0:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75)) # , label='Simulation'))
ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Dispatching decision'))
else:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgman, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75))
ax.add_patch(patches.Rectangle((ind[i], avgman), width, avgsched, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75))
ax.scatter(ind[i] + width / 2, avgman + avgsched, marker='_', s=markersize / 4, zorder=0, color='black')
# add some text for labels, title and axes ticks
ax.set_ylabel('Time [ms]', fontsize=fontsize)
ax.set_xlabel('Dispatching method', fontsize=fontsize)
ax.set_title(title)
ax.set_xticks([i + width / 2 for i in ind])
if legend:
ax.legend()
ax.set_xticklabels(self._labels)
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.grid(linestyle=':', color='gray', zorder=0)
plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize)
plt.show()
ff = PdfPages(output)
ff.savefig(fig)
ff.close()
def box_plot_memory(self, data, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, output='Output.pdf'):
"""
Produces a bar plot for the memory usage in the simulations, across test instances.
The bars depict average and maximum memory usage in the simulation.
:param data: the data for memory usage in each simulation step. Is a list, where
each element is again a list containing the data for a certain test instance;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param legend: enables or disables visualization of the legend;
:param output: the path to the output file;
"""
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
N = len(data)
spacing = 0.2
ind = [i * spacing for i in np.arange(N)]
width = 0.1
markersize = 250
fig, ax = plt.subplots(figsize=figsize)
for i in range(N):
avgmem = np.average(np.array(data[i]))
maxmem = np.max(np.array(data[i]))
if i == 0:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75, label='Avg. Mem'))
ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75, label='Max. Mem'))
else:
ax.add_patch(patches.Rectangle((ind[i], 0), width, avgmem, facecolor='orange', edgecolor='black', hatch='//', alpha=0.75))
ax.add_patch(patches.Rectangle((ind[i], avgmem), width, maxmem - avgmem, facecolor='blue', edgecolor='black', hatch='\\', alpha=0.75))
ax.scatter(ind[i] + width / 2, maxmem, marker='_', s=markersize / 4, zorder=0, color='black')
ax.set_ylabel('Average Memory Usage [MB]', fontsize=fontsize)
ax.set_xlabel('Dispatching method', fontsize=fontsize)
ax.set_title(title)
ax.set_xticks([i + width / 2 for i in ind])
if legend:
ax.legend()
ax.set_xticklabels(self._labels)
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.grid(linestyle=':', color='gray', zorder=0)
plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize)
plt.show()
ff = PdfPages(output)
ff.savefig(fig)
ff.close()
def scalability_plot(self, xdata, ydata, title='', scale='linear', xlim=(None, None), ylim=(None, None), figsize=(7, 5), legend=True, smooth=30, linestyles=None, markers=None, output='Output.pdf'):
"""
Creates a scalability plot for all test instances, where X represents the queue size, and Y the average
time required by each dispatching method in the instances.
:param xdata: the X data, containing the queue sizes for each test instance; is a list, where each element
contains a list with the data for each test instance;
:param ydata: the Y data, containing the average times required to perform dispatching in each test instance;
is a list, where each element contains a list with the data for each test instance;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param legend: enables or disables visualization of the legend;
:param smooth: smoothing factor for the Savitzky-Golay filter. The lower the number, the higher the smoothing;
:param output: the path of the output file;
"""
fontsize = 12
plt.rc('xtick', labelsize=fontsize)
plt.rc('ytick', labelsize=fontsize)
if not linestyles:
linestyles = ('-', '-', '--', '--', '-.', '-.', ':', ':')
if not markers:
markers = (None, 'o', None, '^', None, 's', None, 'p')
numstyles = len(linestyles)
fig, ax = plt.subplots(figsize=figsize)
divideFactor = smooth
for i in range(len(xdata)):
markeroffset = floor(max(xdata[i]) / 20 + i * 2)
if divideFactor > 1 and len(ydata[i]) >= divideFactor:
win_len = floor(len(ydata[i]) / divideFactor)
win_len += (win_len + 1) % 2
if win_len < 5:
win_len = 5
yfiltered = savgol_filter(ydata[i], win_len, 3)
else:
yfiltered = ydata[i]
ax.plot(xdata[i], yfiltered, label=self._labels[i], linestyle=linestyles[i % numstyles], marker=markers[i % numstyles], markevery=markeroffset, zorder=2 if markers[i % numstyles] is None else 0)
ax.set_ylabel('Time [ms]', fontsize=fontsize)
ax.set_xlabel('Queue size', fontsize=fontsize)
ax.set_title(title)
if legend:
ax.legend()
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
plt.grid(linestyle=':', color='gray', zorder=0)
plt.setp(plt.gca().get_legend().get_texts(), fontsize=fontsize)
plt.show()
ff = PdfPages(output)
ff.savefig(fig)
ff.close()
def distribution_scatter_plot(self, xdata, ydata, title='', scale='linear', xlim=(0, 1.05), ylim=(0, 1.05), figsize=(7, 5), alpha=0.005, output='Output.pdf'):
"""
Creates a distribution scatter plot for the system's resource efficiency.
The X values represent the amount of used nodes in a certain time step, while the Y values represent the
fraction of used resources in such nodes. Darker areas of the plot represent values with higher frequency.
The method creates one plot per test instance, automatically.
:param xdata:
:param ydata:
:param alpha: the alpha to be used for each dot in the plot;
:param title: the title of the plot;
:param scale: the scale of the plot;
:param xlim: the left-right boundaries for the plot, is a tuple;
:param ylim: the bottom-top boundaries for the plot, is a tuple;
:param figsize: the size of the figure, is a tuple;
:param output: the path to the output files: the label for each test instance will be automatically added
for each file;
"""
for i in range(len(xdata)):
fig, ax = plt.subplots(figsize=figsize)
ax.scatter(xdata[i], ydata[i], color='black', alpha=alpha, s=5)
ax.set_title(title)
ax.set_xlabel('Used Nodes')
ax.set_ylabel('Used Resources')
ax.set_yscale(scale)
ax.set_ylim(top=ylim[1], bottom=ylim[0], emit=True, auto=False)
ax.set_xlim(left=xlim[0], right=xlim[1], emit=True, auto=False)
ax.grid(True)
plt.show()
splitoutput = splitext(output)
ff = PdfPages(splitoutput[0] + '-' + self._labels[i] + '.pdf')
ff.savefig(fig)
ff.close()
def get_preprocessed_benchmark_data(self):
"""
Returns all of the pre-processed benchmark-related data.
A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed.
Also, each element of the tuple is a list, with as many entries as the files that were processed, in the
same order. Each element of these lists contains then the data related to a specific metric, for a specific
test instance. All data is stored in standard Python lists.
:return: a tuple in which every element is a list containing, in each element, a specific kind of data
regarding one of the test instances. The tuple contains, in this order:
- the resource usage statistics' dictionaries;
- the lists of dispatching times for each time step;
- the lists of management times for each time step;
- the lists of memory usage values for each time step;
- the X scalability data containing the queue size for each test instance;
- the Y scalability data containing the average dispatching times for each test instance;
"""
if not self._preprocessed or self._plot_class != self.BENCHMARK_CLASS:
return None, None, None, None, None, None
else:
return self._simdata, self._schedtimes, self._mantimes, self._simmemory, self._scalabilitydataX, self._scalabilitydataY
def get_preprocessed_schedule_data(self):
"""
Returns all of the pre-processed schedule-related data.
A tuple is returned; each element of the tuple is related to a specific kind of metric that was processed.
Also, each element of the tuple is a list, with as many entries as the files that were processed, in the
same order. Each element of these lists contains then the data related to a specific metric, for a specific
test instance. All data is stored in standard Python lists.
:return: a tuple in which every element is a list containing, in each element, the data regarding one of the
test instances. The tuple contains, in this order:
- the slowdown values for jobs;
- the queue sizes for all time steps;
- the resource allocation efficiencies for all jobs;
- the X data regarding the load ratios (fraction of used nodes) for all time steps;
- the Y data regarding the load ratios (fraction of used resources) for all time steps;
"""
if not self._preprocessed or self._plot_class != self.SCHEDULE_CLASS:
return None, None, None, None, None
else:
return self._slowdowns, self._queuesizes, self._efficiencies, self._loadratiosX, self._loadratiosY
if __name__ == '__main__':
# This is an example. It should not be executed here, but in a script in the project's root, where also
# basic_example.py is, so that all imports can be resolved correctly.
resultpath = ['Path/to/benchmark/file',
'Path/to/benchmark/file2']
resultlabel = ['Label',
'Label2']
plots = PlotFactory('benchmark')
plots.set_files(resultpath, resultlabel)
plots.pre_process()
plots.produce_plot(type='scalability', title='My Scalability Plot')
| mit |
grocsvs/grocsvs | src/grocsvs/stages/refine_grid_search_breakpoints.py | 1 | 13463 | # FOR GRID SEARCH CANDIDATES
import itertools
import numpy
import os
import pandas
import scipy.stats
from grocsvs import step
from grocsvs import structuralvariants
from grocsvs.stages import sv_candidates
class CombineRefinedBreakpointsStep(step.StepChunk):
@staticmethod
def get_steps(options):
yield CombineRefinedBreakpointsStep(options)
def __init__(self, options):
self.options = options
def __str__(self):
return ".".join([self.__class__.__name__])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
paths = {
"refined_pairs": os.path.join(directory, "refined_pairs.tsv")
}
return paths
def run(self):
inputs = []
chroms = self.options.reference.chroms
for chromx, chromy in itertools.product(chroms, chroms):
if self.options.reference.compare_chroms(chromx, chromy) < 0: continue
input_step = RefineGridSearchBreakpointsStep(self.options, chromx, chromy)
inpath = input_step.outpaths(final=True)["refined_pairs"]
try:
inputs.append(pandas.read_table(inpath))
except pandas.io.common.EmptyDataError:
pass
if len(inputs) == 0:
raise Exception("No candidate SVs discovered.")
combined = pandas.concat(inputs)
combined["chromx"] = combined["chromx"].astype("string")
combined["chromy"] = combined["chromy"].astype("string")
combined.to_csv(self.outpaths(final=False)["refined_pairs"], sep="\t", index=False)
class RefineGridSearchBreakpointsStep(step.StepChunk):
"""
Takes the rough grid search (aka barcode overlaps) candidates, then performs
breakpoint refinement to find the best potential breakpoint in the expected
orientation
"""
# TODO: refactor so that this and the final refine breakpoints steps share
# most of the refinement code
@staticmethod
def get_steps(options):
chroms = options.reference.chroms
for chromx, chromy in itertools.product(chroms, chroms):
if options.reference.compare_chroms(chromx, chromy) < 0: continue
yield RefineGridSearchBreakpointsStep(options, chromx, chromy)
def __init__(self, options, chromx, chromy):
self.options = options
self.chromx = chromx
self.chromy = chromy
def __str__(self):
return ".".join([self.__class__.__name__,
self.chromx,
self.chromy])
def outpaths(self, final):
directory = self.results_dir if final \
else self.working_dir
paths = {
"refined_pairs": os.path.join(directory, "refined_pairs.{}.{}.tsv".format(self.chromx, self.chromy))
}
return paths
def run(self):
outpath = self.outpaths(final=False)["refined_pairs"]
events = self.load_events()
if len(events) > 0:
refined = refine_events(events, self.options, self.logger)
refined.to_csv(outpath, sep="\t", index=False)
else:
open(outpath, "w")
def load_events(self):
significant_events = []
cur_events = []
for sample, dataset in self.options.iter_10xdatasets():
if self.options.reference.compare_chroms(self.chromx, self.chromy) < 0: continue
input_step = sv_candidates.SVCandidatesStep(self.options, sample, dataset, self.chromx, self.chromy)
inpath = input_step.outpaths(final=True)["svs"]
try:
sample_events = pandas.read_table(inpath)
if len(sample_events) > 0:
cur_events.append(sample_events)
except pandas.io.common.EmptyDataError:
pass
if len(cur_events) > 0:
significant_events = combine_nearby_events(pandas.concat(cur_events))
significant_events = significant_events[["chromx", "x", "chromy", "y", "orientation"]]
significant_events["chromx"] = significant_events["chromx"].astype("string")
significant_events["chromy"] = significant_events["chromy"].astype("string")
return significant_events
else:
return []
def combine_nearby_events(table, max_distance=5000):
"""
2d-clustering of breakpoints (ie pairs of breakENDs)
"""
if len(table) == 0:
return table
combined_tables = []
table = table.reset_index(drop=True)
for orientation, cur_table in table.groupby("orientation"):
# it's already a copy, but this will suppress a warning
cur_table = cur_table.copy()
points = [(row.x, row.y, row.Index) for row in cur_table.itertuples()]
clusters = structuralvariants.do_free_clustering(points, max_dist=5000)
cur_table["cluster"] = 0
for i, cluster in enumerate(clusters):
for point in cluster:
cur_table.loc[point[2], "cluster"] = i
cur_combined = cur_table.groupby("cluster").aggregate(
{"chromx": lambda x:x.iloc[0],
"chromy": lambda x:x.iloc[0],
"x": numpy.mean,
"y": numpy.mean,
"orientation": lambda x:x.iloc[0],
})
combined_tables.append(cur_combined)
combined_table = pandas.concat(combined_tables, ignore_index=True)
combined_table["x"] = combined_table["x"].astype(int)
combined_table["y"] = combined_table["y"].astype(int)
return combined_table
def refine_events(events, options, logger):
# TODO: gah
refinement_dist1 = -20000
refinement_dist2 = 20000
refinement_extend = 20000
quantification_dist1 = -500
quantification_dist2 = 5000
good_bc_counts_by_dataset, barcode_frequencies_by_dataset = get_barcode_info(options)
results = []
count = 0
for i, event in events.iterrows():
print ">>>", i, event.dtypes
logger.log("{}:{}::{}:{}{}".format(event["chromx"], event["x"],
event["chromy"], event["y"],
event["orientation"]))
if count % 10 == 0:
logger.log("{} of {} ({:.0%})".format(count, len(events), count/float(len(events))))
count += 1
# First get better breakpoints
refined = refine_breakpoint(
event["chromx"], event["x"],
event["chromy"], event["y"],
event["orientation"], options,
refinement_dist1, refinement_dist2, refinement_extend)
if refined is None:
continue
newx, newy = refined
# Next quantify the event based on the better breakpoint loci
quantification = quantify_breakpoint(
event["chromx"], newx,
event["chromy"], newy,
event["orientation"],
options, good_bc_counts_by_dataset,
barcode_frequencies_by_dataset,
quantification_dist1, quantification_dist2)
quantification["original_x"] = event["x"]
quantification["original_y"] = event["y"]
results.append(quantification)
return pandas.DataFrame(results)
def get_shared_frags(options, sample, dataset, chromx, x, chromy, y, orientation, dist1, dist2):
fragsx, fragsy, merged = structuralvariants.get_supporting_fragments_new(
options, sample, dataset, chromx, x, chromy, y, orientation, dist1, dist2)
bcx = set(fragsx["bc"])
bcy = set(fragsy["bc"])
common_barcodes = bcx.intersection(bcy)
shared_fragsx = fragsx.loc[fragsx["bc"].isin(common_barcodes)]
shared_fragsy = fragsy.loc[fragsy["bc"].isin(common_barcodes)]
return shared_fragsx, shared_fragsy
def refine_breakpoint(chromx, x, chromy, y, orientation,
options, dist1, dist2, extend):
shared_fragsx = []
shared_fragsy = []
# because all we're concerned with for refinement is the fragments
# with common barcodes across the breakpoint, we'll do refinement
# with all datasets without worrying if a given dataset supports
# the event
for sample, dataset in options.iter_10xdatasets():
cur_fragsx, cur_fragsy = get_shared_frags(
options, sample, dataset, chromx, x, chromy, y, orientation, dist1, dist2)
shared_fragsx.append(cur_fragsx)
shared_fragsy.append(cur_fragsy)
shared_fragsx = pandas.concat(shared_fragsx)
shared_fragsy = pandas.concat(shared_fragsy)
if len(shared_fragsx) < 1:
return None
breakpointx = get_breakpoint(shared_fragsx, x, orientation[0], extend)
breakpointy = get_breakpoint(shared_fragsy, y, orientation[1], extend)
return breakpointx, breakpointy
def get_breakpoint(frags, pos, orientation, extend=20000):
density = numpy.zeros(extend*2)
for i, frag in frags.iterrows():
curstart = max(frag["start_pos"]-(pos-extend), 0)
curend = min(frag["end_pos"]-(pos-extend), len(density))
density[int(curstart):int(curend)] += 1
peaks = numpy.where(density>(0.9*density.max()))[0]
if orientation == "+":
peak = peaks[0]
elif orientation == "-":
peak = peaks[-1]
else:
raise Exception("unknown orientation: {}".format(orientation))
diff = density[peak] - density
dist = numpy.sqrt(numpy.abs(numpy.arange(len(density))-peak))
score = numpy.ma.masked_array(diff / dist.astype(float), mask=False)
score.mask[numpy.isnan(score)] = True
if orientation == "+":
score.mask[numpy.arange(0, peak)] = True
elif orientation == "-":
score.mask[numpy.arange(peak, len(score))] = True
else:
raise Exception("unknown orientation: {}".format(orientation))
breakpoint = numpy.where(score==score.max())[0][0]
breakpoint += pos - extend
return breakpoint
def get_barcode_info(options):
good_bc_counts_by_dataset = {}
barcode_frequencies_by_dataset = {}
for sample, dataset in options.iter_10xdatasets():
sample_info = options.sample_info(sample.name)
dataset_info = sample_info[dataset.id]
good_bc_counts_by_dataset[dataset.id] = dataset_info["good_bc_count"]
sample_info = options.sample_info(sample.name)
dataset_info = sample_info[dataset.id]
barcode_frequencies = dataset_info["barcode_read_totals"]
barcode_frequencies /= numpy.array(barcode_frequencies.sum()).astype(float)
barcode_frequencies = barcode_frequencies.values
barcode_frequencies_by_dataset[dataset.id] = barcode_frequencies
return good_bc_counts_by_dataset, barcode_frequencies_by_dataset
def quantify_breakpoint(chromx, x, chromy, y, orientation,
options, good_bc_counts_by_dataset, barcode_frequencies_by_dataset,
dist1, dist2, with_phasing=False):
cur_result = {}
cur_result["chromx"] = chromx
cur_result["new_x"] = x
cur_result["chromy"] = chromy
cur_result["new_y"] = y
cur_result["orientation"] = orientation
cur_result["shared"] = 0
cur_result["total"] = 0
for sample, dataset in options.iter_10xdatasets():
barcode_frequencies = barcode_frequencies_by_dataset[dataset.id]
fragsx, fragsy, merged = structuralvariants.get_supporting_fragments_new(
options, sample, dataset,
chromx, x, chromy, y, orientation, dist1, dist2,
with_phasing=with_phasing)
bcx = set(fragsx["bc"])
bcy = set(fragsy["bc"])
common_barcodes = bcx.intersection(bcy)
total_barcodes = bcx.union(bcy)
cur_result["{}_total".format(sample.name)] = len(total_barcodes)
if len(common_barcodes) < 1:
continue
good_bc_count = good_bc_counts_by_dataset[dataset.id]
contingency_table = numpy.array([[len(common_barcodes), len(bcx-bcy)],
[len(bcy-bcx), good_bc_count-len(total_barcodes)]])
p_fisher = scipy.stats.fisher_exact(contingency_table, alternative="greater")[1]
p_resampling = structuralvariants.score_event(
len(bcx), len(bcy), len(common_barcodes), barcode_frequencies, resamples=100)
cur_result["{}_shared".format(sample.name)] = len(common_barcodes)
cur_result["{}_p_fisher".format(sample.name)] = p_fisher
cur_result["{}_p_resampling".format(sample.name)] = p_resampling
if with_phasing:
cur_result["{}_x_hap0".format(sample.name)] = (merged["hap_x"].isin([0,2])).sum()
cur_result["{}_x_hap1".format(sample.name)] = (merged["hap_x"] == 1).sum()
cur_result["{}_y_hap0".format(sample.name)] = (merged["hap_y"].isin([0,2])).sum()
cur_result["{}_y_hap1".format(sample.name)] = (merged["hap_y"] == 1).sum()
# TODO: constants should be constant across steps!
if (p_resampling < 1e-4) and (len(common_barcodes)/float(len(total_barcodes)) > 0.10):
cur_result["shared"] += len(common_barcodes)
cur_result["total"] += len(total_barcodes)
cur_result["p_resampling"] = min(cur_result.get("{}_p_resampling".format(sample_name), 1.0)
for sample_name in options.samples)
return pandas.Series(cur_result)
| mit |
hugobowne/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
AMGitsKriss/Battlegrounds | battlegrounds_write_graph.py | 2 | 1492 | import pandas as pd
#Enumerate colors.
class COLOR:
RED = "tomato"
GREEN = "yellowgreen"
BLUE = "lightblue"
NEWLINE_INDENT = "\n "
def fill(color):
return f"[style=filled fillcolor=\"{color}\"]"
def dual_label(weapon, n):
return f"[label=\"{weapon}\" taillabel=\"{n}\"]"
def solo_node(player, color):
return f"{NEWLINE_INDENT}\"{player}\" {fill(color)};"
def inter_node(actor, victim, weapon, n):
return f"{NEWLINE_INDENT}\"{actor}\" -> \"{victim}\" {dual_label(weapon, n)};"
def digraphWrite(data, name):
print("Writing digraph code...")
with open(f"{name}.dot","w") as f:
f.write("digraph {")
# We're rounding all the values to the neaerest 100
# We need to define the colours first for them to work
for i in data.index:
row = data.iloc[i]
temp = ""
if(row['Deed'] == "died"):
if (row['Weapon'] == "Blue Zone"):
temp = solo_node(row['Player'], COLOR.BLUE)
else:
temp = solo_node(row['Player'], COLOR.RED)
elif(row['Deed'] == "won"):
temp = solo_node(row['Player'], COLOR.GREEN)
f.write(temp)
# Then we can define the graph edges
n = 0
for i in data.index:
row = data.iloc[i]
if(row['Deed'] == "killed"):
n += 1
f.write(inter_node(row['Player'], row['Target'], row['Weapon'], n))
f.write("\n}")
print(f"Outputted graph script to {name}.dot...")
def main():
data = pd.read_csv("battlegrounds.csv", low_memory=False)
digraphWrite(data, "kill_map")
# Load data
if __name__ == '__main__':
main()
| mit |
Clyde-fare/scikit-learn | sklearn/metrics/ranking.py | 79 | 25426 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.fixes import bincount
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
rs2/pandas | pandas/io/sql.py | 1 | 62655 | """
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from contextlib import contextmanager
from datetime import date, datetime, time
from functools import partial
import re
from typing import Iterator, Optional, Union, overload
import warnings
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.common import is_datetime64tz_dtype, is_dict_like, is_list_like
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.missing import isna
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy # noqa: F811
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""Convert SQL and params args to DBAPI2.0 compliant format."""
args = [sql]
if params is not None:
if hasattr(params, "keys"): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _process_parse_dates_argument(parse_dates):
"""Process parse_dates argument for read_sql functions"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
elif not hasattr(parse_dates, "__iter__"):
parse_dates = [parse_dates]
return parse_dates
def _handle_date_column(col, utc=None, format=None):
if isinstance(format, dict):
return to_datetime(col, errors="ignore", **format)
else:
# Allow passing of formatting string for integers
# GH17855
if format is None and (
issubclass(col.dtype.type, np.floating)
or issubclass(col.dtype.type, np.integer)
):
format = "s"
if format in ["D", "d", "h", "m", "s", "ms", "us", "ns"]:
return to_datetime(col, errors="coerce", unit=format, utc=utc)
elif is_datetime64tz_dtype(col.dtype):
# coerce to UTC timezone
# GH11216
return to_datetime(col, utc=True)
else:
return to_datetime(col, errors="coerce", format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
# we want to coerce datetime64_tz dtypes for now to UTC
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.items():
if is_datetime64tz_dtype(df_col.dtype) or col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True, parse_dates=None):
"""Wrap result set of query in a DataFrame."""
frame = DataFrame.from_records(data, columns=columns, coerce_float=coerce_float)
frame = _parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
SQL query to be executed.
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by the
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_table(
table_name,
con,
schema=None,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL database table into a DataFrame.
Given a table name and a SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : str
Name of SQL table in database.
con : SQLAlchemy connectable or str
A database URI could be provided as as str.
SQLite DBAPI connection mode not supported.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default None
List of column names to select from SQL table.
chunksize : int, default None
If specified, returns an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information will be converted to UTC.
Examples
--------
>>> pd.read_sql_table('table_name', 'postgres:///db_name') # doctest:+SKIP
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError(
"read_sql_table only supported for SQLAlchemy connectable."
)
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError as err:
raise ValueError(f"Table {table_name} not found") from err
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
if table is not None:
return table
else:
raise ValueError(f"Table {table_name} not found", con)
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql_query(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : str SQL query or SQLAlchemy Selectable (select or text object)
SQL query to be executed.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC.
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: None = None,
) -> DataFrame:
...
@overload
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: int = 1,
) -> Iterator[DataFrame]:
...
def read_sql(
sql,
con,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
columns=None,
chunksize: Optional[int] = None,
) -> Union[DataFrame, Iterator[DataFrame]]:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
Parameters
----------
sql : str or SQLAlchemy Selectable (select or text object)
SQL query to be executed or a table name.
con : SQLAlchemy connectable, str, or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library. If a DBAPI2 object, only sqlite3 is supported. The user is responsible
for engine disposal and connection closure for the SQLAlchemy connectable; str
connections are closed automatically. See
`here <https://docs.sqlalchemy.org/en/13/core/connections.html>`_.
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
coerce_float : bool, default True
Attempts to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame or Iterator[DataFrame]
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
try:
_is_table_name = pandas_sql.has_table(sql)
except Exception:
# using generic exception to catch errors from sql drivers (GH24988)
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
else:
return pandas_sql.read_query(
sql,
index_col=index_col,
params=params,
coerce_float=coerce_float,
parse_dates=parse_dates,
chunksize=chunksize,
)
def to_sql(
frame,
name,
con,
schema=None,
if_exists="fail",
index=True,
index_label=None,
chunksize=None,
dtype=None,
method=None,
) -> None:
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame, Series
name : str
Name of SQL table.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : str, optional
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : str or sequence, optional
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, optional
Specify the number of rows in each batch to be written at a time.
By default, all rows will be written at once.
dtype : dict or scalar, optional
Specifying the datatype for columns. If a dictionary is used, the
keys should be the column names and the values should be the
SQLAlchemy types or strings for the sqlite3 fallback mode. If a
scalar is provided, it will be applied to all columns.
method : {None, 'multi', callable}, optional
Controls the SQL insertion clause used:
- None : Uses standard SQL ``INSERT`` clause (one per row).
- 'multi': Pass multiple values in a single ``INSERT`` clause.
- callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if if_exists not in ("fail", "replace", "append"):
raise ValueError(f"'{if_exists}' is not valid for if_exists")
pandas_sql = pandasSQL_builder(con, schema=schema)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError(
"'frame' argument should be either a Series or a DataFrame"
)
pandas_sql.to_sql(
frame,
name,
if_exists=if_exists,
index=index,
index_label=index_label,
schema=schema,
chunksize=chunksize,
dtype=dtype,
method=method,
)
def has_table(table_name, con, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table.
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it.
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, str):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, schema=None, meta=None, is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters.
"""
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, str):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type conversions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(
self,
name,
pandas_sql_engine,
frame=None,
index=True,
if_exists="fail",
prefix="pandas",
index_label=None,
schema=None,
keys=None,
dtype=None,
):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError(f"Could not init table '{name}'")
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == "fail":
raise ValueError(f"Table '{self.name}' already exists.")
elif self.if_exists == "replace":
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == "append":
pass
else:
raise ValueError(f"'{self.if_exists}' is not valid for if_exists")
else:
self._execute_create()
def _execute_insert(self, conn, keys, data_iter):
"""
Execute SQL statement inserting data
Parameters
----------
conn : sqlalchemy.engine.Engine or sqlalchemy.engine.Connection
keys : list of str
Column names
data_iter : generator of list
Each item contains a list of values to be inserted
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(), data)
def _execute_insert_multi(self, conn, keys, data_iter):
"""
Alternative to _execute_insert for DBs support multivalue INSERT.
Note: multi-value insert is usually faster for analytics DBs
and tables containing a few columns
but performance degrades quickly with increase of columns.
"""
data = [dict(zip(keys, row)) for row in data_iter]
conn.execute(self.table.insert(data))
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(f"duplicate name in index/columns: {err}") from err
else:
temp = self.frame
column_names = list(map(str, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
for i, (_, ser) in enumerate(temp.items()):
vals = ser._values
if vals.dtype.kind == "M":
d = vals.to_pydatetime()
elif vals.dtype.kind == "m":
# store as integers, see GH#6921, GH#7076
d = vals.view("i8").astype(object)
else:
d = vals.astype(object)
assert isinstance(d, np.ndarray), type(d)
if ser._can_hold_na:
# Note: this will miss timedeltas since they are converted to int
mask = isna(d)
d[mask] = None
data_list[i] = d
return column_names, data_list
def insert(self, chunksize=None, method=None):
# set insert method
if method is None:
exec_insert = self._execute_insert
elif method == "multi":
exec_insert = self._execute_insert_multi
elif callable(method):
exec_insert = partial(method, self)
else:
raise ValueError(f"Invalid parameter `method`: {method}")
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError("chunksize argument should be non-zero")
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
exec_insert(conn, keys, chunk_iter)
def _query_iterator(
self, result, chunksize, columns, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set."""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None, chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
for idx in self.index[::-1]:
cols.insert(0, self.table.c[idx])
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
column_names,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float
)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
f"levels, which is {nlevels}"
)
else:
return index_label
# return the used column labels for the index columns
if (
nlevels == 1
and "index" not in self.frame.columns
and self.frame.index.name is None
):
return ["index"]
else:
return [
l if l is not None else f"level_{i}"
for i, l in enumerate(self.frame.index.names)
]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, str):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(self.frame.index._get_level_values(i))
column_names_and_types.append((str(idx_label), idx_type, True))
column_names_and_types += [
(str(self.frame.columns[i]), dtype_mapper(self.frame.iloc[:, i]), False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Column, PrimaryKeyConstraint, Table
column_names_and_types = self._get_column_names_and_types(self._sqlalchemy_type)
columns = [
Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types
]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + "_pk")
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required.
"""
parse_dates = _process_parse_dates_argument(parse_dates)
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# Handle date parsing upfront; don't try to convert columns
# twice
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(df_col, format=fmt)
continue
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (
col_type is datetime
or col_type is date
or col_type is DatetimeTZDtype
):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype("int64") or col_type is bool:
self.frame[col_name] = df_col.astype(col_type, copy=False)
except KeyError:
pass # this column not in results
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
from sqlalchemy.types import (
TIMESTAMP,
BigInteger,
Boolean,
Date,
DateTime,
Float,
Integer,
Text,
Time,
)
if col_type == "datetime64" or col_type == "datetime":
# GH 9086: TIMESTAMP is the suggested type if the column contains
# timezone information
try:
if col.dt.tz is not None:
return TIMESTAMP(timezone=True)
except AttributeError:
# The column is actually a DatetimeIndex
# GH 26761 or an Index with date-like data e.g. 9999-01-01
if getattr(col, "tz", None) is not None:
return TIMESTAMP(timezone=True)
return DateTime
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
return BigInteger
elif col_type == "floating":
if col.dtype == "float32":
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == "integer":
if col.dtype == "int32":
return Integer
else:
return BigInteger
elif col_type == "boolean":
return Boolean
elif col_type == "date":
return Date
elif col_type == "time":
return Time
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import TIMESTAMP, Boolean, Date, DateTime, Float, Integer
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype("int64")
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql.
"""
def read_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
def to_sql(self, *args, **kwargs):
raise ValueError(
"PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection"
)
class SQLDatabase(PandasSQL):
"""
This class enables conversion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction.
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, "execute"):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execution_options(no_parameters=True).execute(
*args, **kwargs
)
def read_table(
self,
table_name,
index_col=None,
coerce_float=True,
parse_dates=None,
columns=None,
schema=None,
chunksize=None,
):
"""
Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database.
index_col : string, optional, default: None
Column to set as index.
coerce_float : boolean, default True
Attempts to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite.
columns : list, default: None
List of column names to select from SQL table.
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQL database object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(
coerce_float=coerce_float,
parse_dates=parse_dates,
columns=columns,
chunksize=chunksize,
)
@staticmethod
def _query_iterator(
result, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
def read_query(
self,
sql,
index_col=None,
coerce_float=True,
parse_dates=None,
params=None,
chunksize=None,
):
"""
Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed.
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets.
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates.
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times, or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps.
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(
result,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = result.fetchall()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
return frame
read_sql = read_query
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table.
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column.
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
method : {None', 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import TypeEngine, to_instance
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError(f"The type of {col} is not a SQLAlchemy type")
table = SQLTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
schema=schema,
dtype=dtype,
)
table.create()
from sqlalchemy import exc
try:
table.insert(chunksize, method=method)
except exc.SQLAlchemyError as err:
# GH34431
msg = "(1054, \"Unknown column 'inf' in 'field list'\")"
err_text = str(err.orig)
if re.search(msg, err_text):
raise ValueError("inf cannot be used with MySQL") from err
else:
raise err
if not name.isdigit() and not name.islower():
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema, connection=conn
)
if name not in table_names:
msg = (
f"The provided table name '{name}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table, name, schema or self.meta.schema
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get(".".join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(
table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
"string": "TEXT",
"floating": "REAL",
"integer": "INTEGER",
"datetime": "TIMESTAMP",
"date": "DATE",
"time": "TIME",
"boolean": "INTEGER",
}
def _get_unicode_name(name):
try:
uname = str(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError as err:
raise ValueError(f"Cannot convert identifier to UTF-8: '{name}'") from err
return uname
def _get_valid_sqlite_name(name):
# See https://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError("SQLite identifier cannot contain NULs")
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = (
"The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to underscores."
)
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super().__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self, *, num_rows):
names = list(map(str, self.frame.columns))
wld = "?" # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
for idx in self.index[::-1]:
names.insert(0, idx)
bracketed_names = [escape(column) for column in names]
col_names = ",".join(bracketed_names)
row_wildcards = ",".join([wld] * len(names))
wildcards = ",".join(f"({row_wildcards})" for _ in range(num_rows))
insert_statement = (
f"INSERT INTO {escape(self.name)} ({col_names}) VALUES {wildcards}"
)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(num_rows=1), data_list)
def _execute_insert_multi(self, conn, keys, data_iter):
data_list = list(data_iter)
flattened_data = [x for row in data_list for x in row]
conn.execute(self.insert_statement(num_rows=len(data_list)), flattened_data)
def _create_table_setup(self):
"""
Return a list of SQL statements that creates a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements.
"""
column_names_and_types = self._get_column_names_and_types(self._sql_type_name)
pat = re.compile(r"\s+")
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [
escape(cname) + " " + ctype for cname, ctype, _ in column_names_and_types
]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join(escape(c) for c in keys)
create_tbl_stmts.append(
f"CONSTRAINT {self.name}_pk PRIMARY KEY ({cnames_br})"
)
create_stmts = [
"CREATE TABLE "
+ escape(self.name)
+ " (\n"
+ ",\n ".join(create_tbl_stmts)
+ "\n)"
]
ix_cols = [cname for cname, _, is_index in column_names_and_types if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join(escape(c) for c in ix_cols)
create_stmts.append(
"CREATE INDEX "
+ escape("ix_" + self.name + "_" + cnames)
+ "ON "
+ escape(self.name)
+ " ("
+ cnames_br
+ ")"
)
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
# Infer type of column, while ignoring missing values.
# Needed for inserting typed data containing NULLs, GH 8778.
col_type = lib.infer_dtype(col, skipna=True)
if col_type == "timedelta64":
warnings.warn(
"the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the database.",
UserWarning,
stacklevel=8,
)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError("Complex datatypes not supported")
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support SQLite connections (fallback without
SQLAlchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, is_cursor=False):
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except Exception:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
cur.execute(*args, **kwargs)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception as inner_exc: # pragma: no cover
ex = DatabaseError(
f"Execution failed on sql: {args[0]}\n{exc}\nunable to rollback"
)
raise ex from inner_exc
ex = DatabaseError(f"Execution failed on sql '{args[0]}': {exc}")
raise ex from exc
@staticmethod
def _query_iterator(
cursor, chunksize, columns, index_col=None, coerce_float=True, parse_dates=None
):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
def read_query(
self,
sql,
index_col=None,
coerce_float=True,
params=None,
parse_dates=None,
chunksize=None,
):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(
cursor,
chunksize,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(
data,
columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates,
)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(
self,
frame,
name,
if_exists="fail",
index=True,
index_label=None,
schema=None,
chunksize=None,
dtype=None,
method=None,
):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: string
Name of SQL table.
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if it does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatibility with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
method : {None, 'multi', callable}, default None
Controls the SQL insertion clause used:
* None : Uses standard SQL ``INSERT`` clause (one per row).
* 'multi': Pass multiple values in a single ``INSERT`` clause.
* callable with signature ``(pd_table, conn, keys, data_iter)``.
Details and a sample callable implementation can be found in the
section :ref:`insert method <io.sql.method>`.
.. versionadded:: 0.24.0
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError(f"{col} ({my_type}) not a string")
table = SQLiteTable(
name,
self,
frame=frame,
index=index,
if_exists=if_exists,
index_label=index_label,
dtype=dtype,
)
table.create()
table.insert(chunksize, method)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = "?"
query = f"SELECT name FROM sqlite_master WHERE type='table' AND name={wld};"
return len(self.execute(query, [name]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = f"DROP TABLE {_get_valid_sqlite_name(name)}"
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(
table_name, self, frame=frame, index=False, keys=keys, dtype=dtype
)
return str(table.sql_schema())
def get_schema(frame, name, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| bsd-3-clause |
alanlhutchison/empirical-JTK_CYCLE-with-asymmetry | previous_files/jtk7.py | 1 | 19118 | #!/usr/bin/env python
"""
Created on April 20 2014
@author: Alan L. Hutchison, alanlhutchison@uchicago.edu, Aaron R. Dinner Group, University of Chicago
This script is one in a series of scripts for running empirical JTK_CYCLE analysis as described in
Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e 1004094. doi:10.1371/journal.pcbi.1004094
Please use ./jtk7.py -h to see the help screen for further instructions on running this script.
"""
VERSION="1.1"
from scipy.stats import kendalltau
from operator import itemgetter
import numpy as np
import sys
import argparse
import itertools as it
import time
#import matplotlib.pyplot as plt
#import matplotlib.cm as cm
from scipy.stats import norm
import os.path
def main(args):
fn = args.filename
prefix = args.prefix
fn_waveform = args.waveform
fn_period = args.period
fn_phase = args.phase
fn_width = args.width
fn_out = args.output
if fn_out == "DEFAULT":
if ".txt" in fn:
fn_out=fn.replace(".txt","_"+prefix+"_jtkout.txt")
else:
fn_out = fn+"_" +prefix + "_jtkout.txt"
print fn
add_on = 1
while os.path.isfile(fn_out):
print fn_out, "already exists, take evasive action!!!"
endstr = '.'+fn_out.split('.')[-1]
mid = '_'+str(add_on)+endstr
if add_on ==1:
fn_out = fn_out.replace(endstr,mid)
else:
midendstr = '_'+fn_out.split('_')[-1]
fn_out = fn_out.replace(midendstr,mid)
add_on = add_on + 1
waveforms = read_in_list(fn_waveform)
periods = read_in_list(fn_period)
phases = read_in_list(fn_phase)
widths = read_in_list(fn_width)
#fn_out = "\t".join(fn.replace("jtkprepared","").split(".")[0:-1])+"_jtkout_emprical.txt"
header,data = read_in(fn)
header,series = organize_data(header,data)
RealKen = KendallTauP()
#output = ["ID\tWaveform\tPeriod\tPhase\tAsymmetry\tMean\tStd_Dev\tMax\tMin\tMax_Amp\tFC\tIQR_FC\tTau\tempP"]
Ps = []
with open(fn_out,'w') as g:
g.write("ID\tWaveform\tPeriod\tPhase\tAsymmetry\tMean\tStd_Dev\tMax\tMaxLoc\tMin\tMinLoc\tMax_Amp\tFC\tIQR_FC\tTau\tP\n")
for serie in series:
if [s for s in serie[1:] if s!="NA"]==[]:
name = [serie[0]]+["All_NA"]+[-10000]*10+[np.nan,np.nan]
else:
mmax,mmaxloc,mmin,mminloc,MAX_AMP=series_char(serie,header)
sIQR_FC=IQR_FC(serie)
smean = series_mean(serie)
sstd = series_std(serie)
sFC = FC(serie)
local_ps = []
for waveform in waveforms:
for period in periods:
for phase in phases:
for width in widths:
reference = generate_base_reference(header,waveform,period,phase,width)
geneID,tau,p = generate_mod_series(reference,serie,RealKen)
out_line = [geneID,waveform,period,phase,width,smean,sstd,mmax,mmaxloc,mmin,mminloc,MAX_AMP,sFC,sIQR_FC,tau,p]
out_line = [str(l) for l in out_line]
g.write("\t".join(out_line)+"\n")
#local_ps = sorted(local_ps)
#best = min(local_ps)
#Ps.append(best)
#append_out(fn_out,best)
#name = [geneID,waveform,period,phase,width,smean,sstd,mmax,mmin,MAX_AMP,sFC,sIQR_FC,tau,empirical_p]
#name = [str(n) for n in name]
#print "\t".join(name)
#print time.asctime( time.localtime(time.time()) )
#output.append("\t".join(name))
#write_out(fn_out,Ps)
def append_out(fn_out,line):
line = [str(l) for l in line]
with open(fn_out,'a') as g:
g.write("\t".join(line)+"\n")
def write_out(fn_out,output):
with open(fn_out,'w') as g:
for line in output:
g.write(str(line)+"\n")
def is_number(s):
try:
float(s)
return True
except ValueError:
return False
def read_in_list(fn):
with open(fn,'r') as f:
lines = f.read().splitlines()
return lines
def read_in(fn):
"""Read in data to header and data"""
with open(fn,'r') as f:
data=[]
start_right=0
for line in f:
words = line.strip().split()
words = [word.strip() for word in words]
if words[0] == "#":
start_right = 1
header = words[1:]
else:
if start_right == 0:
print "Please enter file with header starting with #"
elif start_right == 1:
data.append(words)
return header, data
def organize_data(header,data):
"""
Organize list of lists from such that genes with similar time-series holes match (for null distribution calc)
Return a header ['#','ZTX','ZTY'...] and a list of lists [ lists with similar holes (identical null distribution) , [],[],[]]
"""
L = data
for i in xrange(1,len(header)):
L=sorted(L, key=itemgetter(i))
return header,L
def generate_base_reference(header,waveform="cosine",period=24,phase=0,width=12):
"""
This will generate a waveform with a given phase and period based on the header,
"""
tpoints = []
ZTs = header
coef = 2.0 * np.pi / float(period)
w = float(width) * coef
for ZT in ZTs:
z = ZT[2:].split("_")[0]
tpoints.append( (float(z)-float(phase) ) * coef)
def trough(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = 1 + -x/w
elif x > w:
y = (x-w)/(2*np.pi - w)
return y
def cosine(x,w):
x = x % (2*np.pi)
w = w % (2*np.pi)
if x <= w:
y = np.cos(x/(w/np.pi))
elif x > w:
y = np.cos( (x+2.*(np.pi-w))*np.pi/ (2*np.pi - w) )
return y
if waveform == "cosine":
reference=[cosine(tpoint,w) for tpoint in tpoints]
elif waveform == "trough":
reference=[trough(tpoint,w) for tpoint in tpoints]
return reference
def IQR_FC(series):
qlo = __score_at_percentile__(series, 25)
qhi = __score_at_percentile__(series, 75)
if (qlo=="NA" or qhi=="NA"):
return "NA"
elif (qhi==0):
return 0
elif ( qlo==0):
return "NA"
else:
iqr = qhi/qlo
return iqr
def FC(series):
series=[float(s) if s!="NA" else 0 for s in series[1:] if s!="NA" ]
if series!=[]:
mmax = max(series)
mmin = min(series)
if mmin==0:
sFC = -10000
else:
sFC = mmax / mmin
else:
sFC = "NA"
return sFC
def series_char(fullseries,header):
"""Uses interquartile range to estimate amplitude of a time series."""
series=[float(s) for s in fullseries[1:] if s!="NA"]
head = [header[i] for i,s in enumerate(fullseries[1:]) if s!="NA"]
if series!=[]:
mmax = max(series)
#print series.index(mmax)
mmaxloc = head[series.index(mmax)]
mmin = min(series)
#print series.index(mmin)
mminloc = head[series.index(mmin)]
diff=mmax-mmin
else:
mmax = "NA"
mmaxloc = "NA"
mmin = "NA"
mminloc = "NA"
diff = "NA"
return mmax,mmaxloc,mmin,mminloc,diff
def series_mean(series):
"""Finds the mean of a timeseries"""
series = [float(s) for s in series[1:] if s!="NA"]
return np.mean(series)
def series_std(series):
"""Finds the std dev of a timeseries"""
series = [float(s) for s in series[1:] if s!="NA"]
return np.std(series)
def __score_at_percentile__(ser, per):
ser = [float(se) for se in ser[1:] if se!="NA"]
if len(ser)<5:
score ="NA"
return score
else:
ser = np.sort(ser)
i = (per/100. * len(ser))
if (i % 1 == 0):
score = ser[i]
else:
interpolate = lambda a,b,frac: a + (b - a)*frac
score = interpolate(ser[int(i)], ser[int(i) + 1], i % 1)
return float(score)
def generate_mod_series(reference,series,RealKen):
"""
Takes the series from generate_base_null, takes the list from data, and makes a null
for each gene in data or uses the one previously calculated.
Then it runs Kendall's Tau on the exp. series against the null
"""
geneID = series[0]
values = series[1:]
binary = np.array([1.0 if value!="NA" else np.nan for value in values])
reference = np.array(reference)
temp = reference*binary
mod_reference = [value for value in temp if not np.isnan(value)]
mod_values = [float(value) for value in values if value!='NA']
if len(mod_values) < 3:
tau,p = np.nan,np.nan
elif mod_values.count(np.nan) == len(mod_values):
tau,p = np.nan,np.nan
elif mod_values.count(0) == len(mod_values):
tau,p = np.nan,np.nan
else:
tau,p=kendalltau(mod_values,mod_reference)
if not np.isnan(tau):
if len(mod_values) < 150:
pk = RealKen.pval(tau,len(mod_values))
if pk is not None:
p=pk
else:
p = p / 2.0
if tau < 0:
p = 1-p
return geneID,tau,p
def __create_parser__():
p = argparse.ArgumentParser(
description="Python script for running empirical JTK_CYCLE with asymmetry search as described in Hutchison, Maienschein-Cline, and Chiang et al. Improved statistical methods enable greater sensitivity in rhythm detection for genome-wide data, PLoS Computational Biology 2015 11(3): e1004094. This script was written by Alan L. Hutchison, alanlhutchison@uchicago.edu, Aaron R. Dinner Group, University of Chicago.",
epilog="Please contact the correpsonding author if you have any questions.",
version=VERSION
)
#p.add_argument("-t", "--test",
# action='store_true',
# default=False,
# help="run the Python unittest testing suite")
p.add_argument("-o", "--output",
dest="output",
action='store',
metavar="filename string",
type=str,
default = "DEFAULT",
help="You want to output something. If you leave this blank, _jtkout.txt will be appended to your filename")
analysis = p.add_argument_group(title="JTK_CYCLE analysis options")
analysis.add_argument("-f", "--filename",
dest="filename",
action='store',
metavar="filename string",
type=str,
help='This is the filename of the data series you wish to analyze.\
The data should be tab-spaced. The first row should contain a # sign followed by the time points with either CT or ZT preceding the time point (such as ZT0 or ZT4). Longer or shorter prefixes will not work. The following rows should contain the gene/series ID followed by the values for every time point. Where values are not available NA should be put in it\'s place.')
analysis.add_argument("-x","--prefix",
dest="prefix",
type=str,
metavar="string",
action='store',
default="",
help="string to be inserted in the output filename for this run")
analysis.add_argument("--waveform",
dest="waveform",
type=str,
metavar="filename string",
action='store',
default="cosine",
#choices=["waveform_cosine.txt","waveform_rampup.txt","waveform_rampdown.txt","waveform_step.txt","waveform_impulse.txt","waveform_trough.txt"],
help='Should be a file with waveforms you wish to search for listed in a single column separated by newlines.\
Options include cosine (dflt), trough')
analysis.add_argument("-w", "--width", "-a", "--asymmetry",
dest="width",
type=str,
metavar="filename string",
action='store',
default="widths_02-22.txt",
#choices=["widths_02-22.txt","widths_04-20_by4.txt","widths_04-12-20.txt","widths_08-16.txt","width_12.txt"]
help='Should be a file with asymmetries (widths) you wish to search for listed in a single column separated by newlines.\
Provided files include files like "widths_02-22.txt","widths_04-20_by4.txt","widths_04-12-20.txt","widths_08-16.txt","width_12.txt"\nasymmetries=widths')
analysis.add_argument("-ph", "--phase",
dest="phase",
metavar="filename string",
type=str,
default="phases_00-22_by2.txt",
help='Should be a file with phases you wish to search for listed in a single column separated by newlines.\
Example files include "phases_00-22_by2.txt" or "phases_00-22_by4.txt" or "phases_00-20_by4.txt"')
analysis.add_argument("-p","--period",
dest="period",
metavar="filename string",
type=str,
action='store',
default="period_24.txt",
help='Should be a file with phases you wish to search for listed in a single column separated by newlines.\
Provided file is "period_24.txt"')
distribution = analysis.add_mutually_exclusive_group(required=False)
distribution.add_argument("-e", "--exact",
dest="harding",
action='store_true',
default=False,
help="use Harding's exact null distribution (dflt)")
distribution.add_argument("-n", "--normal",
dest="normal",
action='store_true',
default=False,
help="use normal approximation to null distribution")
return p
# instantiate class to precalculate distribution
# usage:
# K = KendallTauP()
# pval = K.pval(tau,n,two_tailed=True)
class KendallTauP:
def __init__(self,N=150):
# largest number of samples to precompute
self.N = N
Nint = self.N*(self.N-1)/2
# first allocate freq slots for largest sample array
# as we fill this in we'll save the results for smaller samples
# total possible number of inversions is Nint + 1
freqN = np.zeros(Nint + 1)
freqN[0] = 1.0
# save results at each step in freqs array
self.freqs = [np.array([1.0])]
for i in xrange(1,self.N):
last = np.copy(freqN)
for j in xrange(Nint+1):
# update each entry by summing over i entries to the left
freqN[j] += sum(last[max(0,j-i):j])
# copy current state into freqs array
# the kth entry of freqs should have 1+k*(k-1)/2 entries
self.freqs.append(np.copy(freqN[0:(1+(i+1)*i/2)]))
# turn freqs into cdfs
# distributions still with respect to number of inversions
self.cdfs = []
for i in xrange(self.N):
self.cdfs.append(np.copy(self.freqs[i]))
# turn into cumulative frequencies
for j in xrange(1,len(self.freqs[i])):
self.cdfs[i][j] += self.cdfs[i][j-1]
# convert freqs to probs
self.cdfs[i] = self.cdfs[i]/sum(self.freqs[i])
# plot exact distribution compared to normal approx
def plot(self,nlist):
colors = cm.Set1(np.linspace(0,1,len(nlist)))
# for plotting gaussian
x = np.linspace(-1.2,1.2,300)
# plot pdfs
plt.figure()
for i in xrange(len(nlist)):
ntot = len(self.freqs[nlist[i]-1])-1
tauvals = (ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot
probs = ((ntot+1.0)/2.0)*self.freqs[nlist[i]-1]/sum(self.freqs[nlist[i]-1])
plt.scatter(tauvals,probs,color=colors[i])
# now plot gaussian comparison
var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0)
plt.plot(x,norm.pdf(x,0.0,np.sqrt(var)),color=colors[i])
plt.legend(nlist,loc='best')
# plt.savefig('pdfs.png')
plt.show()
# now plot cdfs
plt.figure()
for i in xrange(len(nlist)):
ntot = len(self.freqs[nlist[i]-1])-1
tauvals = -1.0*(ntot - 2.0*np.arange(len(self.freqs[nlist[i]-1])))/ntot
probs = self.cdfs[nlist[i]-1]
plt.scatter(tauvals,probs,color=colors[i])
# now plot gaussian comparison
var = 2.0*(2.0*nlist[i]+5.0)/(nlist[i]*(nlist[i]-1)*9.0)
plt.plot(x,norm.cdf(x,0.0,np.sqrt(var)),color=colors[i])
plt.legend(nlist,loc='best')
# plt.savefig('cdfs.png')
plt.show()
# use cdfs to return pval
# default to return two tailed pval
def pval(self,tau,n,two_tailed=False):
# enforce tau is between -1 and 1
if tau <= -1.000001 or tau >= 1.000001:
sys.stderr.write(str(type(tau))+"\n")
sys.stderr.write(str(tau)+"\n")
sys.stderr.write("invalid tau\n")
#print 'invalid tau'
return None
# enforce n is less than our precomputed quantities
if n > self.N:
#print 'n is too large'
sys.stderr.write("n is too large/n")
return None
# convert tau to value in terms of number of inversions
ntot = n*(n-1)/2
inv_score = int(round((ntot - tau * ntot)/2.0))
# I'm a little worried about the precision of this,
# but probably not enough to be really worried for reasonable n
# since we really only need precision to resolve ntot points
# if two tailed, we're getting a tail from a symmetric dist
min_inv_score = min(inv_score,ntot-inv_score)
if two_tailed:
pval = self.cdfs[n-1][min_inv_score]*2.0
else:
# if one tailed return prob of getting that or fewer inversions
pval = self.cdfs[n-1][inv_score]
# if inv_score is 0, might have larger than 0.5 prob
return min(pval,1.0)
if __name__=="__main__":
parser = __create_parser__()
args = parser.parse_args()
main(args)
| mit |
etkirsch/scikit-learn | examples/svm/plot_custom_kernel.py | 171 | 1546 | """
======================
SVM with custom kernel
======================
Simple usage of Support Vector Machines to classify a sample. It will
plot the decision surface and the support vectors.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
Y = iris.target
def my_kernel(X, Y):
"""
We create a custom kernel:
(2 0)
k(X, Y) = X ( ) Y.T
(0 1)
"""
M = np.array([[2, 0], [0, 1.0]])
return np.dot(np.dot(X, M), Y.T)
h = .02 # step size in the mesh
# we create an instance of SVM and fit out data.
clf = svm.SVC(kernel=my_kernel)
clf.fit(X, Y)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.pcolormesh(xx, yy, Z, cmap=plt.cm.Paired)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.title('3-Class classification using Support Vector Machine with custom'
' kernel')
plt.axis('tight')
plt.show()
| bsd-3-clause |
google/telluride_decoding | test/scaled_lda_test.py | 1 | 6747 | # Copyright 2020 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for telluride_decoding.scaled_lda."""
import os
from absl.testing import absltest
import matplotlib.pyplot as plt
import numpy as np
from telluride_decoding import scaled_lda
class ScaledLdaTest(absltest.TestCase):
def test_one_dimensional_data(self):
num_points = 1000
d1 = np.random.randn(num_points,) - 5
d2 = np.random.randn(num_points,) + 5
lda = scaled_lda.ScaledLinearDiscriminantAnalysis()
lda.fit_two_classes(d1, d2)
d1_transformed = lda.transform(d1)
self.assertAlmostEqual(np.mean(d1_transformed), 0)
d2_transformed = lda.transform(d2)
self.assertAlmostEqual(np.mean(d2_transformed), 1)
def test_two_dimensional_data(self):
num_points = 1000
num_dims = 2
d1 = np.matmul(np.random.randn(num_points, num_dims),
[[2, 0], [0, 0.5]]) + [-2, 1]
d2 = np.matmul(np.random.randn(num_points, num_dims),
[[2, 0], [0, 0.5]]) + [2, -1]
# Plot the original data.
plt.clf()
plt.subplot(2, 1, 1)
plt.plot(d1[:, 0], d1[:, 1], 'rx')
plt.plot(d2[:, 0], d2[:, 1], 'bo')
plt.title('Original Data')
x = np.concatenate((d1, d2), axis=0)
y = np.concatenate((np.ones(d1.shape[0])*42,
np.ones(d2.shape[0])*-12))
lda = scaled_lda.LinearDiscriminantAnalysis()
with self.assertRaisesRegex(
ValueError, 'Must fit the model before transforming.'):
lda.transform(d1)
with self.assertRaisesRegex(
ValueError, 'Must fit the model before transforming.'):
lda.explained_variance_ratio()
x_lda = lda.fit_transform(x, y)
labels = lda.labels
self.assertLen(labels, 2)
# Plot the transformed data.
plt.subplot(2, 1, 2)
plt.plot(x_lda[y == labels[0], 0], x_lda[y == labels[0], 1], 'rx')
plt.plot(x_lda[y == labels[1], 0], x_lda[y == labels[1], 1], 'bo')
plt.title('Transfomed Data')
# Make sure the transformed centers are symmetric on the first (x) axis.
mean_vectors = [np.reshape(v, (1, -1)) for v in lda.mean_vectors]
centers = lda.transform(np.concatenate(mean_vectors, axis=0))
print('Transformed centers are:', centers)
self.assertAlmostEqual(centers[0, 0], -centers[1, 0], delta=0.1)
np.testing.assert_allclose(centers[:, 1], [0., 0.], atol=0.1)
plt.savefig(os.path.join(os.environ.get('TMPDIR') or '/tmp',
'scaled_lda.png'))
with self.assertRaisesRegex(
TypeError, 'Inconsistent training and transform sizes'):
lda.transform(d1[:, 0:1])
# Now test model from saved parameters
nlda = scaled_lda.LinearDiscriminantAnalysis()
nlda.model_parameters = lda.model_parameters # Get/set parameters test
centers = nlda.transform(np.concatenate(mean_vectors, axis=0))
self.assertAlmostEqual(centers[0, 0], -centers[1, 0], delta=0.1)
np.testing.assert_allclose(centers[:, 1], [0., 0.], atol=0.1)
def test_fitted_data(self):
"""Makes sure we can generate a fitted model with .from_fitted_data.
"""
num_points = 1000
num_dims = 2
d1 = np.matmul(np.random.randn(num_points, num_dims),
[[2, 0], [0, 0.5]]) + [-2, 1]
d2 = np.matmul(np.random.randn(num_points, num_dims),
[[2, 0], [0, 0.5]]) + [2, -1]
x = np.concatenate((d1, d2), axis=0)
y = np.concatenate((np.ones(d1.shape[0])*42,
np.ones(d2.shape[0])*-12))
lda = scaled_lda.LinearDiscriminantAnalysis.from_fitted_data(x, y)
explained = lda.explained_variance_ratio()
np.testing.assert_allclose(explained, [1., 0.], atol=1e-8)
def test_three_class_data(self):
num_points = 1000
num_dims = 2
d1 = np.matmul(np.random.randn(num_points, num_dims),
[[2, 0], [0, 0.5]]) + [-2, 1]
d2 = np.matmul(np.random.randn(num_points, num_dims),
[[2, 0], [0, 0.5]]) + [2, -1]
d3 = np.matmul(np.random.randn(num_points, num_dims),
[[2, 0], [0, 0.5]])
x = np.concatenate((d1, d2, d3), axis=0)
y = np.concatenate((np.ones(d1.shape[0])*42,
np.ones(d2.shape[0])*-12,
np.ones(d3.shape[0])))
lda = scaled_lda.LinearDiscriminantAnalysis()
x_lda = lda.fit_transform(x, y)
self.assertEqual(x_lda.shape[0], 3*num_points)
self.assertEqual(x_lda.shape[1], 2) # Only two dimensional data.
labels = lda.labels
self.assertLen(labels, 3)
def test_four_dimensional_data(self):
num_points = 1000
num_dims = 4
center = np.array([-2, 1, 3, 2]) # Arbitrary
m1 = np.random.randn(num_points, num_dims) + center
m2 = np.random.randn(num_points, num_dims) + -center
x = np.concatenate((m1, m2), axis=0)
y = np.concatenate((np.ones(m1.shape[0])*0,
np.ones(m2.shape[0])*1.0))
slda = scaled_lda.ScaledLinearDiscriminantAnalysis()
slda.fit_two_classes(m1, m2)
m_lda = slda.transform(x)
self.assertEqual(m_lda.shape, (2*num_points, 2))
self.assertEqual(slda.coef_array.shape[0], num_dims)
self.assertLen(slda.labels, slda.coef_array.shape[1])
mean_vectors = [np.reshape(v, (1, -1)) for v in slda.mean_vectors]
centers = slda.transform(np.concatenate(mean_vectors, axis=0))[:, 0]
np.testing.assert_allclose(centers, [0., 1.0], atol=1e-8)
explained = slda.explained_variance_ratio()
np.testing.assert_allclose(explained, [1., 0., 0., 0.], atol=1e-8)
# Now test save and restoring parameters.
param_dict = slda.model_parameters
nlda = scaled_lda.ScaledLinearDiscriminantAnalysis()
nlda.model_parameters = param_dict
mean_vectors = [np.reshape(v, (1, -1)) for v in nlda.mean_vectors]
centers = nlda.transform(np.concatenate(mean_vectors, axis=0))[:, 0]
np.testing.assert_allclose(centers, [0., 1.0], atol=1e-8)
# Make sure we fail with more than two classes.
with self.assertRaisesRegex(
ValueError, 'Scaled LDA can only be done on two-class data.'):
y[0:2] = 42
slda.fit_transform(x, y)
if __name__ == '__main__':
absltest.main()
| apache-2.0 |
jeffzheng1/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
DANA-Laboratory/CoolProp | Web/scripts/fluid_properties.Incompressibles.py | 3 | 6445 | from __future__ import print_function, division
import os.path
import CoolProp
import CoolProp.CoolProp
import subprocess
import sys
import numpy as np
import matplotlib
matplotlib.use('Agg') #Force mpl to use a non-GUI backend
import matplotlib.pyplot as plt
web_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
#plots_path = os.path.join(web_dir,'fluid_properties','incompressibles_consistency')
plots_path = os.path.join(web_dir,'scripts','incompressibles_consistency')
checked = ["TVP1869", "T66"]
N = 50
p = 100e5
Pr = np.empty(N)
la = np.empty(N)
mu = np.empty(N)
cp = np.empty(N)
fig = plt.figure(tight_layout=True)
Pr_axis = fig.add_subplot(221)
la_axis = fig.add_subplot(222)
mu_axis = fig.add_subplot(223)
cp_axis = fig.add_subplot(224)
#Pr_axis = plt.subplot2grid((3,2), (0,0), rowspan=3)
#la_axis = plt.subplot2grid((3,2), (0,1))
#mu_axis = plt.subplot2grid((3,2), (1,1))
#cp_axis = plt.subplot2grid((3,2), (2,1))
Pr_axis.set_xlabel("Temperature $T$ / deg C")
Pr_axis.set_ylabel("Prandtl Number $Pr$")
#Pr_axis.set_ylim([0,10000])
#Pr_axis.set_yscale("log")
la_axis.set_xlabel("Temperature $T$ / deg C")
la_axis.set_ylabel("Thermal Conductivity $\lambda$ / W/m/K")
#la_axis.set_ylim([0,1])
mu_axis.set_xlabel("Temperature $T$ / deg C")
mu_axis.set_ylabel("Dynamic Viscosity $\mu$ / Pa s")
#mu_axis.set_ylim([0,1])
#mu_axis.set_yscale("log")
cp_axis.set_xlabel("Temperature $T$ / deg C")
cp_axis.set_ylabel("Isobaric Heat Capacity $c_p$ / J/kg/K")
#cp_axis.set_ylim([0,5000])
for fluid in CoolProp.__incompressibles_pure__ + CoolProp.__incompressibles_solution__:
#for fluid in CoolProp.__incompressibles_solution__:
#for fluid in CoolProp.__incompressibles_pure__:
skip_fluid = False
for ignored in ["example","iceea","icena","icepg"]:
if ignored in fluid.lower():
skip_fluid = True
if skip_fluid:
continue
state = CoolProp.AbstractState("INCOMP",fluid)
error = ""
for frac in [0.5,0.2,0.8,0.1,0.9]:
error = ""
try:
state.set_mass_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
try:
state.set_volu_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
try:
state.set_mole_fractions([frac])
state.update(CoolProp.PT_INPUTS,p,state.Tmax())
break
except Exception as e:
error = e.message
pass
Tmin = 0.0
try:
Tmin = state.keyed_output(CoolProp.iT_freeze)
except:
pass
Tmin = max(state.Tmin(), Tmin)+1
Tmax = state.Tmax()
T = np.linspace(Tmin,Tmax, N)
for i, Ti in enumerate(T):
state.update(CoolProp.PT_INPUTS, p, Ti)
Pr[i] = state.Prandtl()
la[i] = state.conductivity()
mu[i] = state.viscosity()
cp[i] = state.cpmass()
#print(np.min(Pr), np.max(Pr))
Pr_axis.plot(T-273.15,Pr)
la_axis.plot(T-273.15,la)
mu_axis.plot(T-273.15,mu)
cp_axis.plot(T-273.15,cp)
if np.max(Pr)>10000:
if fluid not in checked:
print("Very high Prandtl number for {0:s} of {1:f}".format(fluid,np.max(Pr)))
if np.min(Pr)<0.0:
if fluid not in checked:
print("Very low Prandtl number for {0:s} of {1:f}".format(fluid,np.min(Pr)))
if np.max(la)>0.8:
if fluid not in checked:
print("Very high thermal conductivity for {0:s} of {1:f}".format(fluid,np.max(la)))
if np.min(la)<0.3:
if fluid not in checked:
print("Very low thermal conductivity for {0:s} of {1:f}".format(fluid,np.min(la)))
if np.max(mu)>0.2:
if fluid not in checked:
print("Very high viscosity for {0:s} of {1:f}".format(fluid,np.max(mu)))
if np.min(mu)<1e-8:
if fluid not in checked:
print("Very low viscosity for {0:s} of {1:f}".format(fluid,np.min(mu)))
if np.max(cp)>5000:
if fluid not in checked:
print("Very high heat capacity for {0:s} of {1:f}".format(fluid,np.max(cp)))
if np.min(cp)<1000:
if fluid not in checked:
print("Very low heat capacity for {0:s} of {1:f}".format(fluid,np.min(cp)))
#for fluid in CoolProp.__fluids__:
for fluid in ["Water"]:
state = CoolProp.AbstractState("HEOS",fluid)
Tmin = max(state.Tmin(), Pr_axis.get_xlim()[0]+273.15)
Tmax = min(state.Tmax(), Pr_axis.get_xlim()[1]+273.15)
T = np.linspace(Tmin, Tmax, N)
for i, Ti in enumerate(T):
try:
state.update(CoolProp.QT_INPUTS, 0, Ti)
p = state.p() + 1e5
except:
p = state.p_critical() + 1e5
Pr[i] = np.nan
la[i] = np.nan
mu[i] = np.nan
cp[i] = np.nan
try:
state.update(CoolProp.PT_INPUTS, p, Ti)
try:
Pr[i] = state.Prandtl()
except Exception as e:
print(e.message)
try:
la[i] = state.conductivity()
except Exception as e:
print(e.message)
try:
mu[i] = state.viscosity()
except Exception as e:
print(e.message)
try:
cp[i] = state.cpmass()
except Exception as e:
print(e.message)
except:
pass
#print(np.min(Pr), np.max(Pr))
if np.sum(np.isnan(Pr)) == 0:
Pr_axis.plot(T-273.15,Pr,alpha=0.5,ls=":")
else:
#print("Error: Prandtl undefined for "+fluid)
pass
if np.sum(np.isnan(la)) == 0:
la_axis.plot(T-273.15,la,alpha=0.5,ls=":")
else:
#print("Error: Conductivuty undefined for "+fluid)
pass
if np.sum(np.isnan(mu)) == 0:
mu_axis.plot(T-273.15,mu,alpha=0.5,ls=":")
else:
#print("Error: Viscosity undefined for "+fluid)
pass
if np.sum(np.isnan(cp)) == 0:
cp_axis.plot(T-273.15,cp,alpha=0.5,ls=":")
else:
#print("Error: Heat capacity undefined for "+fluid)
pass
fig.tight_layout()
fig.savefig(plots_path+'.pdf')
#fig.savefig(plots_path+'.png')
sys.exit(0)
| mit |
JaviMerino/trappy | trappy/thermal.py | 2 | 9812 | # Copyright 2015-2016 ARM Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Process the output of the power allocator trace in the current
directory's trace.dat"""
from collections import OrderedDict
import pandas as pd
import re
from trappy.base import Base
from trappy.dynamic import register_ftrace_parser
class Thermal(Base):
"""Process the thermal framework data in a FTrace dump"""
unique_word = "thermal_temperature:"
"""The unique word that will be matched in a trace line"""
name = "thermal"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "id"
"""The Pivot along which the data is orthogonal"""
def plot_temperature(self, control_temperature=None, title="", width=None,
height=None, ylim="range", ax=None, legend_label=""):
"""Plot the temperature.
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param legend_label: Label for the legend
:type legend_label: str
:param title: The title of the plot
:type title: str
:param control_temperature: If control_temp is a
:mod:`pd.Series` representing the (possible)
variation of :code:`control_temp` during the
run, draw it using a dashed yellow line.
Otherwise, only the temperature is plotted.
:type control_temperature: :mod:`pandas.Series`
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
"""
from matplotlib import pyplot as plt
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
title = normalize_title("Temperature", title)
if len(self.data_frame) == 0:
raise ValueError("Empty DataFrame")
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(self.data_frame["temp"] / 1000).plot(ax=ax, label=temp_label)
if control_temperature is not None:
ct_label = normalize_title("Control", legend_label)
control_temperature.plot(ax=ax, color="y", linestyle="--",
label=ct_label)
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_temperature_hist(self, ax, title):
"""Plot a temperature histogram
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
"""
from trappy.plot_utils import normalize_title, plot_hist
temps = self.data_frame["temp"] / 1000
title = normalize_title("Temperature", title)
xlim = (0, temps.max())
plot_hist(temps, ax, title, "C", 30, "Temperature", xlim, "default")
register_ftrace_parser(Thermal, "thermal")
class ThermalGovernor(Base):
"""Process the power allocator data in a ftrace dump"""
unique_word = "thermal_power_allocator:"
"""The unique word that will be matched in a trace line"""
name = "thermal_governor"
"""The name of the :mod:`pandas.DataFrame` member that will be created in a
:mod:`trappy.ftrace.FTrace` object"""
pivot = "thermal_zone_id"
"""The Pivot along which the data is orthogonal"""
def plot_temperature(self, title="", width=None, height=None, ylim="range",
ax=None, legend_label=""):
"""Plot the temperature"""
from matplotlib import pyplot as plt
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
curr_temp = dfr["current_temperature"]
control_temp_series = (curr_temp + dfr["delta_temperature"]) / 1000
title = normalize_title("Temperature", title)
setup_plot = False
if not ax:
ax = pre_plot_setup(width, height)
setup_plot = True
temp_label = normalize_title("Temperature", legend_label)
(curr_temp / 1000).plot(ax=ax, label=temp_label)
control_temp_series.plot(ax=ax, color="y", linestyle="--",
label="control temperature")
if setup_plot:
post_plot_setup(ax, title=title, ylim=ylim)
plt.legend()
def plot_input_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot input power
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
:param actor_order: An array showing the order in which the actors
were registered. The array values are the labels that
will be used in the input and output power plots.
For Example:
::
["GPU", "A15", "A7"]
:type actor_order: list
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match("req_power[0-9]+", s)]
plot_dfr = dfr[in_cols]
# Rename the columns from "req_power0" to "A15" or whatever is
# in actor_order. Note that we can do it just with an
# assignment because the columns are already sorted (i.e.:
# req_power0, req_power1...)
plot_dfr.columns = actor_order
title = normalize_title("Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_weighted_input_power(self, actor_weights, title="", width=None,
height=None, ax=None):
"""Plot weighted input power
:param actor_weights: An array of tuples. First element of the
tuple is the name of the actor, the second is the weight. The
array is in the same order as the :code:`req_power` appear in the
trace.
:type actor_weights: list
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
dfr = self.data_frame
in_cols = [s for s in dfr.columns if re.match(r"req_power\d+", s)]
plot_dfr_dict = OrderedDict()
for in_col, (name, weight) in zip(in_cols, actor_weights):
plot_dfr_dict[name] = dfr[in_col] * weight / 1024
plot_dfr = pd.DataFrame(plot_dfr_dict)
title = normalize_title("Weighted Input Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_output_power(self, actor_order, title="", width=None, height=None,
ax=None):
"""Plot output power
:param ax: Axis instance
:type ax: :mod:`matplotlib.Axis`
:param title: The title of the plot
:type title: str
:param width: The width of the plot
:type width: int
:param height: The height of the plot
:type int: int
:param actor_order: An array showing the order in which the actors
were registered. The array values are the labels that
will be used in the input and output power plots.
For Example:
::
["GPU", "A15", "A7"]
:type actor_order: list
"""
from trappy.plot_utils import normalize_title, pre_plot_setup, post_plot_setup
out_cols = [s for s in self.data_frame.columns
if re.match("granted_power[0-9]+", s)]
# See the note in plot_input_power()
plot_dfr = self.data_frame[out_cols]
plot_dfr.columns = actor_order
title = normalize_title("Output Power", title)
if not ax:
ax = pre_plot_setup(width, height)
plot_dfr.plot(ax=ax)
post_plot_setup(ax, title=title)
def plot_inout_power(self, title=""):
"""Make multiple plots showing input and output power for each actor
:param title: The title of the plot
:type title: str
"""
from trappy.plot_utils import normalize_title
dfr = self.data_frame
actors = []
for col in dfr.columns:
match = re.match("P(.*)_in", col)
if match and col != "Ptot_in":
actors.append(match.group(1))
for actor in actors:
cols = ["P" + actor + "_in", "P" + actor + "_out"]
this_title = normalize_title(actor, title)
dfr[cols].plot(title=this_title)
register_ftrace_parser(ThermalGovernor, "thermal")
| apache-2.0 |
fengzhyuan/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
ngoix/OCRF | sklearn/linear_model/ransac.py | 14 | 17163 | # coding: utf-8
# Author: Johannes Schönberger
#
# License: BSD 3 clause
import numpy as np
import warnings
from ..base import BaseEstimator, MetaEstimatorMixin, RegressorMixin, clone
from ..utils import check_random_state, check_array, check_consistent_length
from ..utils.random import sample_without_replacement
from ..utils.validation import check_is_fitted
from .base import LinearRegression
from ..utils.validation import has_fit_parameter
_EPSILON = np.spacing(1)
def _dynamic_max_trials(n_inliers, n_samples, min_samples, probability):
"""Determine number trials such that at least one outlier-free subset is
sampled for the given inlier/outlier ratio.
Parameters
----------
n_inliers : int
Number of inliers in the data.
n_samples : int
Total number of samples in the data.
min_samples : int
Minimum number of samples chosen randomly from original data.
probability : float
Probability (confidence) that one outlier-free sample is generated.
Returns
-------
trials : int
Number of trials.
"""
inlier_ratio = n_inliers / float(n_samples)
nom = max(_EPSILON, 1 - probability)
denom = max(_EPSILON, 1 - inlier_ratio ** min_samples)
if nom == 1:
return 0
if denom == 1:
return float('inf')
return abs(float(np.ceil(np.log(nom) / np.log(denom))))
class RANSACRegressor(BaseEstimator, MetaEstimatorMixin, RegressorMixin):
"""RANSAC (RANdom SAmple Consensus) algorithm.
RANSAC is an iterative algorithm for the robust estimation of parameters
from a subset of inliers from the complete data set. More information can
be found in the general documentation of linear models.
A detailed description of the algorithm can be found in the documentation
of the ``linear_model`` sub-package.
Read more in the :ref:`User Guide <ransac_regression>`.
Parameters
----------
base_estimator : object, optional
Base estimator object which implements the following methods:
* `fit(X, y)`: Fit model to given training data and target values.
* `score(X, y)`: Returns the mean accuracy on the given test data,
which is used for the stop criterion defined by `stop_score`.
Additionally, the score is used to decide which of two equally
large consensus sets is chosen as the better one.
If `base_estimator` is None, then
``base_estimator=sklearn.linear_model.LinearRegression()`` is used for
target values of dtype float.
Note that the current implementation only supports regression
estimators.
min_samples : int (>= 1) or float ([0, 1]), optional
Minimum number of samples chosen randomly from original data. Treated
as an absolute number of samples for `min_samples >= 1`, treated as a
relative number `ceil(min_samples * X.shape[0]`) for
`min_samples < 1`. This is typically chosen as the minimal number of
samples necessary to estimate the given `base_estimator`. By default a
``sklearn.linear_model.LinearRegression()`` estimator is assumed and
`min_samples` is chosen as ``X.shape[1] + 1``.
residual_threshold : float, optional
Maximum residual for a data sample to be classified as an inlier.
By default the threshold is chosen as the MAD (median absolute
deviation) of the target values `y`.
is_data_valid : callable, optional
This function is called with the randomly selected data before the
model is fitted to it: `is_data_valid(X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
is_model_valid : callable, optional
This function is called with the estimated model and the randomly
selected data: `is_model_valid(model, X, y)`. If its return value is
False the current randomly chosen sub-sample is skipped.
Rejecting samples with this function is computationally costlier than
with `is_data_valid`. `is_model_valid` should therefore only be used if
the estimated model is needed for making the rejection decision.
max_trials : int, optional
Maximum number of iterations for random sample selection.
stop_n_inliers : int, optional
Stop iteration if at least this number of inliers are found.
stop_score : float, optional
Stop iteration if score is greater equal than this threshold.
stop_probability : float in range [0, 1], optional
RANSAC iteration stops if at least one outlier-free set of the training
data is sampled in RANSAC. This requires to generate at least N
samples (iterations)::
N >= log(1 - probability) / log(1 - e**m)
where the probability (confidence) is typically set to high value such
as 0.99 (the default) and e is the current fraction of inliers w.r.t.
the total number of samples.
residual_metric : callable, optional
Metric to reduce the dimensionality of the residuals to 1 for
multi-dimensional target values ``y.shape[1] > 1``. By default the sum
of absolute differences is used::
lambda dy: np.sum(np.abs(dy), axis=1)
NOTE: residual_metric is deprecated from 0.18 and will be removed in 0.20
Use ``loss`` instead.
loss: string, callable, optional, default "absolute_loss"
String inputs, "absolute_loss" and "squared_loss" are supported which
find the absolute loss and squared loss per sample
respectively.
If ``loss`` is a callable, then it should be a function that takes
two arrays as inputs, the true and predicted value and returns a 1-D
array with the ``i``th value of the array corresponding to the loss
on `X[i]`.
If the loss on a sample is greater than the ``residual_threshold``, then
this sample is classified as an outlier.
random_state : integer or numpy.RandomState, optional
The generator used to initialize the centers. If an integer is
given, it fixes the seed. Defaults to the global numpy random
number generator.
Attributes
----------
estimator_ : object
Best fitted model (copy of the `base_estimator` object).
n_trials_ : int
Number of random selection trials until one of the stop criteria is
met. It is always ``<= max_trials``.
inlier_mask_ : bool array of shape [n_samples]
Boolean mask of inliers classified as ``True``.
References
----------
.. [1] http://en.wikipedia.org/wiki/RANSAC
.. [2] http://www.cs.columbia.edu/~belhumeur/courses/compPhoto/ransac.pdf
.. [3] http://www.bmva.org/bmvc/2009/Papers/Paper355/Paper355.pdf
"""
def __init__(self, base_estimator=None, min_samples=None,
residual_threshold=None, is_data_valid=None,
is_model_valid=None, max_trials=100,
stop_n_inliers=np.inf, stop_score=np.inf,
stop_probability=0.99, residual_metric=None,
loss='absolute_loss', random_state=None):
self.base_estimator = base_estimator
self.min_samples = min_samples
self.residual_threshold = residual_threshold
self.is_data_valid = is_data_valid
self.is_model_valid = is_model_valid
self.max_trials = max_trials
self.stop_n_inliers = stop_n_inliers
self.stop_score = stop_score
self.stop_probability = stop_probability
self.residual_metric = residual_metric
self.random_state = random_state
self.loss = loss
def fit(self, X, y, sample_weight=None):
"""Fit estimator using RANSAC algorithm.
Parameters
----------
X : array-like or sparse matrix, shape [n_samples, n_features]
Training data.
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values.
sample_weight: array-like, shape = [n_samples]
Individual weights for each sample
raises error if sample_weight is passed and base_estimator
fit method does not support it.
Raises
------
ValueError
If no valid consensus set could be found. This occurs if
`is_data_valid` and `is_model_valid` return False for all
`max_trials` randomly chosen sub-samples.
"""
X = check_array(X, accept_sparse='csr')
y = check_array(y, ensure_2d=False)
check_consistent_length(X, y)
if self.base_estimator is not None:
base_estimator = clone(self.base_estimator)
else:
base_estimator = LinearRegression()
if self.min_samples is None:
# assume linear model by default
min_samples = X.shape[1] + 1
elif 0 < self.min_samples < 1:
min_samples = np.ceil(self.min_samples * X.shape[0])
elif self.min_samples >= 1:
if self.min_samples % 1 != 0:
raise ValueError("Absolute number of samples must be an "
"integer value.")
min_samples = self.min_samples
else:
raise ValueError("Value for `min_samples` must be scalar and "
"positive.")
if min_samples > X.shape[0]:
raise ValueError("`min_samples` may not be larger than number "
"of samples ``X.shape[0]``.")
if self.stop_probability < 0 or self.stop_probability > 1:
raise ValueError("`stop_probability` must be in range [0, 1].")
if self.residual_threshold is None:
# MAD (median absolute deviation)
residual_threshold = np.median(np.abs(y - np.median(y)))
else:
residual_threshold = self.residual_threshold
if self.residual_metric is not None:
warnings.warn(
"'residual_metric' will be removed in version 0.20. Use "
"'loss' instead.", DeprecationWarning)
if self.loss == "absolute_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: np.abs(y_true - y_pred)
else:
loss_function = lambda \
y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
elif self.loss == "squared_loss":
if y.ndim == 1:
loss_function = lambda y_true, y_pred: (y_true - y_pred) ** 2
else:
loss_function = lambda \
y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
elif callable(self.loss):
loss_function = self.loss
else:
raise ValueError(
"loss should be 'absolute_loss', 'squared_loss' or a callable."
"Got %s. " % self.loss)
random_state = check_random_state(self.random_state)
try: # Not all estimator accept a random_state
base_estimator.set_params(random_state=random_state)
except ValueError:
pass
estimator_fit_has_sample_weight = has_fit_parameter(base_estimator,
"sample_weight")
estimator_name = type(base_estimator).__name__
if (sample_weight is not None and not
estimator_fit_has_sample_weight):
raise ValueError("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
n_inliers_best = 0
score_best = np.inf
inlier_mask_best = None
X_inlier_best = None
y_inlier_best = None
# number of data samples
n_samples = X.shape[0]
sample_idxs = np.arange(n_samples)
n_samples, _ = X.shape
for self.n_trials_ in range(1, self.max_trials + 1):
# choose random sample set
subset_idxs = sample_without_replacement(n_samples, min_samples,
random_state=random_state)
X_subset = X[subset_idxs]
y_subset = y[subset_idxs]
# check if random sample set is valid
if (self.is_data_valid is not None
and not self.is_data_valid(X_subset, y_subset)):
continue
# fit model for current random sample set
if sample_weight is None:
base_estimator.fit(X_subset, y_subset)
else:
base_estimator.fit(X_subset, y_subset,
sample_weight=sample_weight[subset_idxs])
# check if estimated model is valid
if (self.is_model_valid is not None and not
self.is_model_valid(base_estimator, X_subset, y_subset)):
continue
# residuals of all data for current random sample model
y_pred = base_estimator.predict(X)
# XXX: Deprecation: Remove this if block in 0.20
if self.residual_metric is not None:
diff = y_pred - y
if diff.ndim == 1:
diff = diff.reshape(-1, 1)
residuals_subset = self.residual_metric(diff)
else:
residuals_subset = loss_function(y, y_pred)
# classify data into inliers and outliers
inlier_mask_subset = residuals_subset < residual_threshold
n_inliers_subset = np.sum(inlier_mask_subset)
# less inliers -> skip current random sample
if n_inliers_subset < n_inliers_best:
continue
if n_inliers_subset == 0:
raise ValueError("No inliers found, possible cause is "
"setting residual_threshold ({0}) too low.".format(
self.residual_threshold))
# extract inlier data set
inlier_idxs_subset = sample_idxs[inlier_mask_subset]
X_inlier_subset = X[inlier_idxs_subset]
y_inlier_subset = y[inlier_idxs_subset]
# score of inlier data set
score_subset = base_estimator.score(X_inlier_subset,
y_inlier_subset)
# same number of inliers but worse score -> skip current random
# sample
if (n_inliers_subset == n_inliers_best
and score_subset < score_best):
continue
# save current random sample as best sample
n_inliers_best = n_inliers_subset
score_best = score_subset
inlier_mask_best = inlier_mask_subset
X_inlier_best = X_inlier_subset
y_inlier_best = y_inlier_subset
# break if sufficient number of inliers or score is reached
if (n_inliers_best >= self.stop_n_inliers
or score_best >= self.stop_score
or self.n_trials_
>= _dynamic_max_trials(n_inliers_best, n_samples,
min_samples,
self.stop_probability)):
break
# if none of the iterations met the required criteria
if inlier_mask_best is None:
raise ValueError(
"RANSAC could not find valid consensus set, because"
" either the `residual_threshold` rejected all the samples or"
" `is_data_valid` and `is_model_valid` returned False for all"
" `max_trials` randomly ""chosen sub-samples. Consider "
"relaxing the ""constraints.")
# estimate final model using all inliers
base_estimator.fit(X_inlier_best, y_inlier_best)
self.estimator_ = base_estimator
self.inlier_mask_ = inlier_mask_best
return self
def predict(self, X):
"""Predict using the estimated model.
This is a wrapper for `estimator_.predict(X)`.
Parameters
----------
X : numpy array of shape [n_samples, n_features]
Returns
-------
y : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.predict(X)
def score(self, X, y):
"""Returns the score of the prediction.
This is a wrapper for `estimator_.score(X, y)`.
Parameters
----------
X : numpy array or sparse matrix of shape [n_samples, n_features]
Training data.
y : array, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
z : float
Score of the prediction.
"""
check_is_fitted(self, 'estimator_')
return self.estimator_.score(X, y)
| bsd-3-clause |
sonata-nfv/son-cli | setup.py | 5 | 3428 | # Copyright (c) 2015 SONATA-NFV, UBIWHERE
# ALL RIGHTS RESERVED.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Neither the name of the SONATA-NFV, UBIWHERE
# nor the names of its contributors may be used to endorse or promote
# products derived from this software without specific prior written
# permission.
#
# This work has been performed in the framework of the SONATA project,
# funded by the European Commission under Grant number 671517 through
# the Horizon 2020 and 5G-PPP programmes. The authors would like to
# acknowledge the contributions of their colleagues of the SONATA
# partner consortium (www.sonata-nfv.eu).
from setuptools import setup, find_packages
import codecs
import os.path as path
# buildout build system
# http://www.buildout.org/en/latest/docs/tutorial.html
# setup() documentation:
# http://python-packaging-user-guide.readthedocs.org/en/
# latest/distributing/#setup-py
cwd = path.dirname(__file__)
longdesc = codecs.open(path.join(cwd, 'README.md'), 'r', 'utf-8').read()
name = 'sonata-cli'
setup(
name=name,
license='Apache License, Version 2.0',
version='3.0',
url='https://github.com/sonata-nfv/son-cli',
author_email='sonata-dev@sonata-nfv.eu',
long_description=longdesc,
package_dir={'': 'src'},
packages=find_packages('src'), # dependency resolution
namespace_packages=['son', ],
include_package_data=True,
package_data= {
'son': ['schema/tests/son-schema/*', 'workspace/samples/*',
'monitor/docker_compose_files/*', 'monitor/grafana/*',
'monitor/prometheus/*', 'monitor/*.exp',
'validate/eventcfg.yml']
},
# in jenkins, the last package in the list is installed first
install_requires=['setuptools', 'pyaml', 'jsonschema', 'validators',
'requests>2.4.2', 'coloredlogs<=5.1.1', 'paramiko',
'termcolor', 'tabulate', 'networkx<=1.12', 'Flask',
'PyJWT>=1.4.2', 'docker==2.0.2', 'scipy', 'numpy',
'watchdog', 'Flask-Cors', 'flask_cache', 'redis',
'pycrypto', 'matplotlib', 'prometheus_client',
'requests-toolbelt==0.8.0'],
zip_safe=False,
entry_points={
'console_scripts': [
'son-workspace=son.workspace.workspace:main',
'son-package=son.package.package:main',
'son-monitor=son.monitor.monitor:main',
'son-profile=son.profile.profile:main',
'son-validate=son.validate.validate:main',
'son-validate-api=son.validate.api.api:main',
'son-access=son.access.access:main'
],
},
test_suite='son',
setup_requires=['pytest-runner'],
tests_require=['pytest']
)
| apache-2.0 |
wavelets/zipline | zipline/utils/test_utils.py | 5 | 3103 | from contextlib import contextmanager
from logbook import FileHandler
from zipline.finance.blotter import ORDER_STATUS
from six import itervalues
import pandas as pd
def to_utc(time_str):
return pd.Timestamp(time_str, tz='US/Eastern').tz_convert('UTC')
def setup_logger(test, path='test.log'):
test.log_handler = FileHandler(path)
test.log_handler.push_application()
def teardown_logger(test):
test.log_handler.pop_application()
test.log_handler.close()
def drain_zipline(test, zipline):
output = []
transaction_count = 0
msg_counter = 0
# start the simulation
for update in zipline:
msg_counter += 1
output.append(update)
if 'daily_perf' in update:
transaction_count += \
len(update['daily_perf']['transactions'])
return output, transaction_count
def assert_single_position(test, zipline):
output, transaction_count = drain_zipline(test, zipline)
if 'expected_transactions' in test.zipline_test_config:
test.assertEqual(
test.zipline_test_config['expected_transactions'],
transaction_count
)
else:
test.assertEqual(
test.zipline_test_config['order_count'],
transaction_count
)
# the final message is the risk report, the second to
# last is the final day's results. Positions is a list of
# dicts.
closing_positions = output[-2]['daily_perf']['positions']
# confirm that all orders were filled.
# iterate over the output updates, overwriting
# orders when they are updated. Then check the status on all.
orders_by_id = {}
for update in output:
if 'daily_perf' in update:
if 'orders' in update['daily_perf']:
for order in update['daily_perf']['orders']:
orders_by_id[order['id']] = order
for order in itervalues(orders_by_id):
test.assertEqual(
order['status'],
ORDER_STATUS.FILLED,
"")
test.assertEqual(
len(closing_positions),
1,
"Portfolio should have one position."
)
sid = test.zipline_test_config['sid']
test.assertEqual(
closing_positions[0]['sid'],
sid,
"Portfolio should have one position in " + str(sid)
)
return output, transaction_count
class ExceptionSource(object):
def __init__(self):
pass
def get_hash(self):
return "ExceptionSource"
def __iter__(self):
return self
def next(self):
5 / 0
def __next__(self):
5 / 0
class ExceptionTransform(object):
def __init__(self):
self.window_length = 1
pass
def get_hash(self):
return "ExceptionTransform"
def update(self, event):
assert False, "An assertion message"
@contextmanager
def nullctx():
"""
Null context manager. Useful for conditionally adding a contextmanager in
a single line, e.g.:
with SomeContextManager() if some_expr else nullcontext:
do_stuff()
"""
yield
| apache-2.0 |
shikhardb/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |
linebp/pandas | doc/make.py | 8 | 12640 | #!/usr/bin/env python
"""
Python script for building documentation.
To build the docs you must have all optional dependencies for pandas
installed. See the installation instructions for a list of these.
<del>Note: currently latex builds do not work because of table formats that are not
supported in the latex generation.</del>
2014-01-30: Latex has some issues but 'latex_forced' works ok for 0.13.0-400 or so
Usage
-----
python make.py clean
python make.py html
"""
from __future__ import print_function
import io
import glob # noqa
import os
import shutil
import sys
from contextlib import contextmanager
import sphinx # noqa
import argparse
import jinja2 # noqa
os.environ['PYTHONPATH'] = '..'
SPHINX_BUILD = 'sphinxbuild'
def _process_user(user):
if user is None or user is False:
user = ''
else:
user = user + '@'
return user
def upload_dev(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/ -essh'.format(user)):
raise SystemExit('Upload to Pydata Dev failed')
def upload_dev_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/dev/'.format(user)):
raise SystemExit('PDF upload to Pydata Dev failed')
def upload_stable(user=None):
'push a copy to the pydata stable directory'
user = _process_user(user)
if os.system('cd build/html; rsync -avz . {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/ -essh'.format(user)):
raise SystemExit('Upload to stable failed')
def upload_stable_pdf(user=None):
'push a copy to the pydata dev directory'
user = _process_user(user)
if os.system('cd build/latex; scp pandas.pdf {0}pandas.pydata.org'
':/usr/share/nginx/pandas/pandas-docs/stable/'.format(user)):
raise SystemExit('PDF upload to stable failed')
def upload_prev(ver, doc_root='./', user=None):
'push a copy of older release to appropriate version directory'
user = _process_user(user)
local_dir = doc_root + 'build/html'
remote_dir = '/usr/share/nginx/pandas/pandas-docs/version/%s/' % ver
cmd = 'cd %s; rsync -avz . %spandas.pydata.org:%s -essh'
cmd = cmd % (local_dir, user, remote_dir)
print(cmd)
if os.system(cmd):
raise SystemExit(
'Upload to %s from %s failed' % (remote_dir, local_dir))
local_dir = doc_root + 'build/latex'
pdf_cmd = 'cd %s; scp pandas.pdf %spandas.pydata.org:%s'
pdf_cmd = pdf_cmd % (local_dir, user, remote_dir)
if os.system(pdf_cmd):
raise SystemExit('Upload PDF to %s from %s failed' % (ver, doc_root))
def build_pandas():
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
def build_prev(ver):
if os.system('git checkout v%s' % ver) != 1:
os.chdir('..')
os.system('python setup.py clean')
os.system('python setup.py build_ext --inplace')
os.chdir('doc')
os.system('python make.py clean')
os.system('python make.py html')
os.system('python make.py latex')
os.system('git checkout master')
def clean():
if os.path.exists('build'):
shutil.rmtree('build')
if os.path.exists('source/generated'):
shutil.rmtree('source/generated')
@contextmanager
def maybe_exclude_notebooks():
"""
Skip building the notebooks if pandoc is not installed.
This assumes that nbsphinx is installed.
"""
base = os.path.dirname(__file__)
notebooks = [os.path.join(base, 'source', nb)
for nb in ['style.ipynb']]
contents = {}
def _remove_notebooks():
for nb in notebooks:
with open(nb, 'rt') as f:
contents[nb] = f.read()
os.remove(nb)
# Skip notebook conversion if
# 1. nbconvert isn't installed, or
# 2. nbconvert is installed, but pandoc isn't
try:
import nbconvert
except ImportError:
print("Warning: nbconvert not installed. Skipping notebooks.")
_remove_notebooks()
else:
try:
nbconvert.utils.pandoc.get_pandoc_version()
except nbconvert.utils.pandoc.PandocMissing:
print("Warning: Pandoc is not installed. Skipping notebooks.")
_remove_notebooks()
yield
for nb, content in contents.items():
with open(nb, 'wt') as f:
f.write(content)
def html():
check_build()
with maybe_exclude_notebooks():
if os.system('sphinx-build -P -b html -d build/doctrees '
'source build/html'):
raise SystemExit("Building HTML failed.")
try:
# remove stale file
os.remove('build/html/pandas.zip')
except:
pass
def zip_html():
try:
print("\nZipping up HTML docs...")
# just in case the wonky build box doesn't have zip
# don't fail this.
os.system('cd build; rm -f html/pandas.zip; zip html/pandas.zip -r -q html/* ')
print("\n")
except:
pass
def latex():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Call the makefile produced by sphinx...
if os.system('make'):
print("Rendering LaTeX failed.")
print("You may still be able to get a usable PDF file by going into 'build/latex'")
print("and executing 'pdflatex pandas.tex' for the requisite number of passes.")
print("Or using the 'latex_forced' target")
raise SystemExit
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def latex_forced():
check_build()
if sys.platform != 'win32':
# LaTeX format.
if os.system('sphinx-build -j 2 -b latex -d build/doctrees '
'source build/latex'):
raise SystemExit("Building LaTeX failed.")
# Produce pdf.
os.chdir('build/latex')
# Manually call pdflatex, 3 passes should ensure latex fixes up
# all the required cross-references and such.
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
os.system('pdflatex -interaction=nonstopmode pandas.tex')
raise SystemExit("You should check the file 'build/latex/pandas.pdf' for problems.")
os.chdir('../..')
else:
print('latex build has not been tested on windows')
def check_build():
build_dirs = [
'build', 'build/doctrees', 'build/html',
'build/latex', 'build/plots', 'build/_static',
'build/_templates']
for d in build_dirs:
try:
os.mkdir(d)
except OSError:
pass
def all():
# clean()
html()
def auto_dev_build(debug=False):
msg = ''
try:
step = 'clean'
clean()
step = 'html'
html()
step = 'upload dev'
upload_dev()
if not debug:
sendmail(step)
step = 'latex'
latex()
step = 'upload pdf'
upload_dev_pdf()
if not debug:
sendmail(step)
except (Exception, SystemExit) as inst:
msg = str(inst) + '\n'
sendmail(step, '[ERROR] ' + msg)
def sendmail(step=None, err_msg=None):
from_name, to_name = _get_config()
if step is None:
step = ''
if err_msg is None or '[ERROR]' not in err_msg:
msgstr = 'Daily docs %s completed successfully' % step
subject = "DOC: %s successful" % step
else:
msgstr = err_msg
subject = "DOC: %s failed" % step
import smtplib
from email.MIMEText import MIMEText
msg = MIMEText(msgstr)
msg['Subject'] = subject
msg['From'] = from_name
msg['To'] = to_name
server_str, port, login, pwd = _get_credentials()
server = smtplib.SMTP(server_str, port)
server.ehlo()
server.starttls()
server.ehlo()
server.login(login, pwd)
try:
server.sendmail(from_name, to_name, msg.as_string())
finally:
server.close()
def _get_dir(subdir=None):
import getpass
USERNAME = getpass.getuser()
if sys.platform == 'darwin':
HOME = '/Users/%s' % USERNAME
else:
HOME = '/home/%s' % USERNAME
if subdir is None:
subdir = '/code/scripts/config'
conf_dir = '%s/%s' % (HOME, subdir)
return conf_dir
def _get_credentials():
tmp_dir = _get_dir()
cred = '%s/credentials' % tmp_dir
with open(cred, 'r') as fh:
server, port, un, domain = fh.read().split(',')
port = int(port)
login = un + '@' + domain + '.com'
import base64
with open('%s/cron_email_pwd' % tmp_dir, 'r') as fh:
pwd = base64.b64decode(fh.read())
return server, port, login, pwd
def _get_config():
tmp_dir = _get_dir()
with open('%s/addresses' % tmp_dir, 'r') as fh:
from_name, to_name = fh.read().split(',')
return from_name, to_name
funcd = {
'html': html,
'zip_html': zip_html,
'upload_dev': upload_dev,
'upload_stable': upload_stable,
'upload_dev_pdf': upload_dev_pdf,
'upload_stable_pdf': upload_stable_pdf,
'latex': latex,
'latex_forced': latex_forced,
'clean': clean,
'auto_dev': auto_dev_build,
'auto_debug': lambda: auto_dev_build(True),
'build_pandas': build_pandas,
'all': all,
}
small_docs = False
# current_dir = os.getcwd()
# os.chdir(os.path.dirname(os.path.join(current_dir, __file__)))
import argparse
argparser = argparse.ArgumentParser(description="""
pandas documentation builder
""".strip())
# argparser.add_argument('-arg_name', '--arg_name',
# metavar='label for arg help',
# type=str|etc,
# nargs='N|*|?|+|argparse.REMAINDER',
# required=False,
# #choices='abc',
# help='help string',
# action='store|store_true')
# args = argparser.parse_args()
#print args.accumulate(args.integers)
def generate_index(api=True, single=False, **kwds):
from jinja2 import Template
with open("source/index.rst.template") as f:
t = Template(f.read())
with open("source/index.rst","w") as f:
f.write(t.render(api=api,single=single,**kwds))
import argparse
argparser = argparse.ArgumentParser(description="pandas documentation builder",
epilog="Targets : %s" % funcd.keys())
argparser.add_argument('--no-api',
default=False,
help='Ommit api and autosummary',
action='store_true')
argparser.add_argument('--single',
metavar='FILENAME',
type=str,
default=False,
help='filename of section to compile, e.g. "indexing"')
argparser.add_argument('--user',
type=str,
default=False,
help='Username to connect to the pydata server')
def main():
args, unknown = argparser.parse_known_args()
sys.argv = [sys.argv[0]] + unknown
if args.single:
args.single = os.path.basename(args.single).split(".rst")[0]
if 'clean' in unknown:
args.single=False
generate_index(api=not args.no_api and not args.single, single=args.single)
if len(sys.argv) > 2:
ftype = sys.argv[1]
ver = sys.argv[2]
if ftype == 'build_previous':
build_prev(ver, user=args.user)
if ftype == 'upload_previous':
upload_prev(ver, user=args.user)
elif len(sys.argv) == 2:
for arg in sys.argv[1:]:
func = funcd.get(arg)
if func is None:
raise SystemExit('Do not know how to handle %s; valid args are %s' % (
arg, list(funcd.keys())))
if args.user:
func(user=args.user)
else:
func()
else:
small_docs = False
all()
# os.chdir(current_dir)
if __name__ == '__main__':
import sys
sys.exit(main())
| bsd-3-clause |
lip6-mptcp/ns3mptcp | src/flow-monitor/examples/wifi-olsr-flowmon.py | 108 | 7439 | # -*- Mode: Python; -*-
# Copyright (c) 2009 INESC Porto
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation;
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# Authors: Gustavo Carneiro <gjc@inescporto.pt>
import sys
import ns.applications
import ns.core
import ns.flow_monitor
import ns.internet
import ns.mobility
import ns.network
import ns.olsr
import ns.wifi
try:
import ns.visualizer
except ImportError:
pass
DISTANCE = 100 # (m)
NUM_NODES_SIDE = 3
def main(argv):
cmd = ns.core.CommandLine()
cmd.NumNodesSide = None
cmd.AddValue("NumNodesSide", "Grid side number of nodes (total number of nodes will be this number squared)")
cmd.Results = None
cmd.AddValue("Results", "Write XML results to file")
cmd.Plot = None
cmd.AddValue("Plot", "Plot the results using the matplotlib python module")
cmd.Parse(argv)
wifi = ns.wifi.WifiHelper.Default()
wifiMac = ns.wifi.NqosWifiMacHelper.Default()
wifiPhy = ns.wifi.YansWifiPhyHelper.Default()
wifiChannel = ns.wifi.YansWifiChannelHelper.Default()
wifiPhy.SetChannel(wifiChannel.Create())
ssid = ns.wifi.Ssid("wifi-default")
wifi.SetRemoteStationManager("ns3::ArfWifiManager")
wifiMac.SetType ("ns3::AdhocWifiMac",
"Ssid", ns.wifi.SsidValue(ssid))
internet = ns.internet.InternetStackHelper()
list_routing = ns.internet.Ipv4ListRoutingHelper()
olsr_routing = ns.olsr.OlsrHelper()
static_routing = ns.internet.Ipv4StaticRoutingHelper()
list_routing.Add(static_routing, 0)
list_routing.Add(olsr_routing, 100)
internet.SetRoutingHelper(list_routing)
ipv4Addresses = ns.internet.Ipv4AddressHelper()
ipv4Addresses.SetBase(ns.network.Ipv4Address("10.0.0.0"), ns.network.Ipv4Mask("255.255.255.0"))
port = 9 # Discard port(RFC 863)
onOffHelper = ns.applications.OnOffHelper("ns3::UdpSocketFactory",
ns.network.Address(ns.network.InetSocketAddress(ns.network.Ipv4Address("10.0.0.1"), port)))
onOffHelper.SetAttribute("DataRate", ns.network.DataRateValue(ns.network.DataRate("100kbps")))
onOffHelper.SetAttribute("OnTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=1]"))
onOffHelper.SetAttribute("OffTime", ns.core.StringValue ("ns3::ConstantRandomVariable[Constant=0]"))
addresses = []
nodes = []
if cmd.NumNodesSide is None:
num_nodes_side = NUM_NODES_SIDE
else:
num_nodes_side = int(cmd.NumNodesSide)
for xi in range(num_nodes_side):
for yi in range(num_nodes_side):
node = ns.network.Node()
nodes.append(node)
internet.Install(ns.network.NodeContainer(node))
mobility = ns.mobility.ConstantPositionMobilityModel()
mobility.SetPosition(ns.core.Vector(xi*DISTANCE, yi*DISTANCE, 0))
node.AggregateObject(mobility)
devices = wifi.Install(wifiPhy, wifiMac, node)
ipv4_interfaces = ipv4Addresses.Assign(devices)
addresses.append(ipv4_interfaces.GetAddress(0))
for i, node in enumerate(nodes):
destaddr = addresses[(len(addresses) - 1 - i) % len(addresses)]
#print i, destaddr
onOffHelper.SetAttribute("Remote", ns.network.AddressValue(ns.network.InetSocketAddress(destaddr, port)))
app = onOffHelper.Install(ns.network.NodeContainer(node))
urv = ns.core.UniformRandomVariable()
app.Start(ns.core.Seconds(urv.GetValue(20, 30)))
#internet.EnablePcapAll("wifi-olsr")
flowmon_helper = ns.flow_monitor.FlowMonitorHelper()
#flowmon_helper.SetMonitorAttribute("StartTime", ns.core.TimeValue(ns.core.Seconds(31)))
monitor = flowmon_helper.InstallAll()
monitor = flowmon_helper.GetMonitor()
monitor.SetAttribute("DelayBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("JitterBinWidth", ns.core.DoubleValue(0.001))
monitor.SetAttribute("PacketSizeBinWidth", ns.core.DoubleValue(20))
ns.core.Simulator.Stop(ns.core.Seconds(44.0))
ns.core.Simulator.Run()
def print_stats(os, st):
print >> os, " Tx Bytes: ", st.txBytes
print >> os, " Rx Bytes: ", st.rxBytes
print >> os, " Tx Packets: ", st.txPackets
print >> os, " Rx Packets: ", st.rxPackets
print >> os, " Lost Packets: ", st.lostPackets
if st.rxPackets > 0:
print >> os, " Mean{Delay}: ", (st.delaySum.GetSeconds() / st.rxPackets)
print >> os, " Mean{Jitter}: ", (st.jitterSum.GetSeconds() / (st.rxPackets-1))
print >> os, " Mean{Hop Count}: ", float(st.timesForwarded) / st.rxPackets + 1
if 0:
print >> os, "Delay Histogram"
for i in range(st.delayHistogram.GetNBins () ):
print >> os, " ",i,"(", st.delayHistogram.GetBinStart (i), "-", \
st.delayHistogram.GetBinEnd (i), "): ", st.delayHistogram.GetBinCount (i)
print >> os, "Jitter Histogram"
for i in range(st.jitterHistogram.GetNBins () ):
print >> os, " ",i,"(", st.jitterHistogram.GetBinStart (i), "-", \
st.jitterHistogram.GetBinEnd (i), "): ", st.jitterHistogram.GetBinCount (i)
print >> os, "PacketSize Histogram"
for i in range(st.packetSizeHistogram.GetNBins () ):
print >> os, " ",i,"(", st.packetSizeHistogram.GetBinStart (i), "-", \
st.packetSizeHistogram.GetBinEnd (i), "): ", st.packetSizeHistogram.GetBinCount (i)
for reason, drops in enumerate(st.packetsDropped):
print " Packets dropped by reason %i: %i" % (reason, drops)
#for reason, drops in enumerate(st.bytesDropped):
# print "Bytes dropped by reason %i: %i" % (reason, drops)
monitor.CheckForLostPackets()
classifier = flowmon_helper.GetClassifier()
if cmd.Results is None:
for flow_id, flow_stats in monitor.GetFlowStats():
t = classifier.FindFlow(flow_id)
proto = {6: 'TCP', 17: 'UDP'} [t.protocol]
print "FlowID: %i (%s %s/%s --> %s/%i)" % \
(flow_id, proto, t.sourceAddress, t.sourcePort, t.destinationAddress, t.destinationPort)
print_stats(sys.stdout, flow_stats)
else:
print monitor.SerializeToXmlFile(cmd.Results, True, True)
if cmd.Plot is not None:
import pylab
delays = []
for flow_id, flow_stats in monitor.GetFlowStats():
tupl = classifier.FindFlow(flow_id)
if tupl.protocol == 17 and tupl.sourcePort == 698:
continue
delays.append(flow_stats.delaySum.GetSeconds() / flow_stats.rxPackets)
pylab.hist(delays, 20)
pylab.xlabel("Delay (s)")
pylab.ylabel("Number of Flows")
pylab.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| gpl-2.0 |
datalyze-solutions/pandas-qt | pandasqt/views/EditDialogs.py | 4 | 8445 | import re
from pandasqt.compat import QtCore, QtGui, Qt, Slot, Signal
from pandasqt.models.SupportedDtypes import SupportedDtypes
import numpy
from pandas import Timestamp
from pandas.tslib import NaTType
class DefaultValueValidator(QtGui.QValidator):
def __init__(self, parent=None):
super(DefaultValueValidator, self).__init__(parent)
self.dtype = None
self.intPattern = re.compile('[-+]?\d+')
self.uintPattern = re.compile('\d+')
self.floatPattern = re.compile('[+-]? *(?:\d+(?:\.\d*)?|\.\d+)')
self.boolPattern = re.compile('(1|t|0|f){1}$')
@Slot(numpy.dtype)
def validateType(self, dtype):
self.dtype = dtype
def fixup(self, string):
pass
def validate(self, s, pos):
if not s:
# s is emtpy
return (QtGui.QValidator.Acceptable, s, pos)
if self.dtype in SupportedDtypes.strTypes():
return (QtGui.QValidator.Acceptable, s, pos)
elif self.dtype in SupportedDtypes.boolTypes():
match = re.match(self.boolPattern, s)
if match:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
elif self.dtype in SupportedDtypes.datetimeTypes():
try:
ts = Timestamp(s)
except ValueError, e:
return (QtGui.QValidator.Intermediate, s, pos)
return (QtGui.QValidator.Acceptable, s, pos)
else:
dtypeInfo = None
if self.dtype in SupportedDtypes.intTypes():
match = re.search(self.intPattern, s)
if match:
try:
value = int(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.uintTypes():
match = re.search(self.uintPattern, s)
if match:
try:
value = int(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.iinfo(self.dtype)
elif self.dtype in SupportedDtypes.floatTypes():
match = re.search(self.floatPattern, s)
print match
if match:
try:
value = float(match.string)
except ValueError, e:
return (QtGui.QValidator.Invalid, s, pos)
dtypeInfo = numpy.finfo(self.dtype)
if dtypeInfo is not None:
if value >= dtypeInfo.min and value <= dtypeInfo.max:
return (QtGui.QValidator.Acceptable, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
else:
return (QtGui.QValidator.Invalid, s, pos)
return (QtGui.QValidator.Invalid, s, pos)
class AddAttributesDialog(QtGui.QDialog):
accepted = Signal(str, object, object)
def __init__(self, parent=None):
super(AddAttributesDialog, self).__init__(parent)
self.initUi()
def initUi(self):
self.setModal(True)
self.resize(303, 168)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Expanding, QtGui.QSizePolicy.Fixed)
self.setSizePolicy(sizePolicy)
self.verticalLayout = QtGui.QVBoxLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Add a new attribute column'), self)
self.gridLayout = QtGui.QGridLayout()
self.columnNameLineEdit = QtGui.QLineEdit(self)
self.columnNameLabel = QtGui.QLabel(self.tr('Name'), self)
self.dataTypeComboBox = QtGui.QComboBox(self)
self.dataTypeComboBox.addItems(SupportedDtypes.names())
self.columnTypeLabel = QtGui.QLabel(self.tr('Type'), self)
self.defaultValueLineEdit = QtGui.QLineEdit(self)
self.lineEditValidator = DefaultValueValidator(self)
self.defaultValueLineEdit.setValidator(self.lineEditValidator)
self.defaultValueLabel = QtGui.QLabel(self.tr('Inital Value(s)'), self)
self.gridLayout.addWidget(self.columnNameLabel, 0, 0, 1, 1)
self.gridLayout.addWidget(self.columnNameLineEdit, 0, 1, 1, 1)
self.gridLayout.addWidget(self.columnTypeLabel, 1, 0, 1, 1)
self.gridLayout.addWidget(self.dataTypeComboBox, 1, 1, 1, 1)
self.gridLayout.addWidget(self.defaultValueLabel, 2, 0, 1, 1)
self.gridLayout.addWidget(self.defaultValueLineEdit, 2, 1, 1, 1)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.verticalLayout.addWidget(self.dialogHeading)
self.verticalLayout.addLayout(self.gridLayout)
self.verticalLayout.addWidget(self.buttonBox)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.dataTypeComboBox.currentIndexChanged.connect(self.updateValidatorDtype)
self.updateValidatorDtype(self.dataTypeComboBox.currentIndex())
def accept(self):
super(AddAttributesDialog, self).accept()
newColumn = self.columnNameLineEdit.text()
dtype = SupportedDtypes.dtype(self.dataTypeComboBox.currentText())
defaultValue = self.defaultValueLineEdit.text()
try:
if dtype in SupportedDtypes.intTypes() + SupportedDtypes.uintTypes():
defaultValue = int(defaultValue)
elif dtype in SupportedDtypes.floatTypes():
defaultValue = float(defaultValue)
elif dtype in SupportedDtypes.boolTypes():
defaultValue = defaultValue.lower() in ['t', '1']
elif dtype in SupportedDtypes.datetimeTypes():
defaultValue = Timestamp(defaultValue)
if isinstance(defaultValue, NaTType):
defaultValue = Timestamp('')
else:
defaultValue = dtype.type()
except ValueError, e:
defaultValue = dtype.type()
self.accepted.emit(newColumn, dtype, defaultValue)
@Slot(int)
def updateValidatorDtype(self, index):
(dtype, name) = SupportedDtypes.tupleAt(index)
self.defaultValueLineEdit.clear()
self.lineEditValidator.validateType(dtype)
class RemoveAttributesDialog(QtGui.QDialog):
accepted = Signal(list)
def __init__(self, columns, parent=None):
super(RemoveAttributesDialog, self).__init__(parent)
self.columns = columns
self.initUi()
def initUi(self):
self.setWindowTitle(self.tr('Remove Attributes'))
self.setModal(True)
self.resize(366, 274)
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Fixed, QtGui.QSizePolicy.Expanding)
self.setSizePolicy(sizePolicy)
self.gridLayout = QtGui.QGridLayout(self)
self.dialogHeading = QtGui.QLabel(self.tr('Select the attribute column(s) which shall be removed'), self)
self.listView = QtGui.QListView(self)
model = QtGui.QStandardItemModel()
for column in self.columns:
item = QtGui.QStandardItem(column)
model.appendRow(item)
self.listView.setModel(model)
self.listView.setSelectionMode(QtGui.QListView.MultiSelection)
self.buttonBox = QtGui.QDialogButtonBox(self)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel | QtGui.QDialogButtonBox.Ok)
self.gridLayout.addWidget(self.dialogHeading, 0, 0, 1, 1)
self.gridLayout.addWidget(self.listView, 1, 0, 1, 1)
self.gridLayout.addWidget(self.buttonBox, 2, 0, 1, 1)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
def accept(self):
selection = self.listView.selectedIndexes()
names = []
for index in selection:
position = index.row()
names.append((position, index.data(QtCore.Qt.DisplayRole)))
super(RemoveAttributesDialog, self).accept()
self.accepted.emit(names) | mit |
theakholic/ThinkStats2 | code/timeseries.py | 66 | 18035 | """This file contains code for use with "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import pandas
import numpy as np
import statsmodels.formula.api as smf
import statsmodels.tsa.stattools as smtsa
import matplotlib.pyplot as pyplot
import thinkplot
import thinkstats2
FORMATS = ['png']
def ReadData():
"""Reads data about cannabis transactions.
http://zmjones.com/static/data/mj-clean.csv
returns: DataFrame
"""
transactions = pandas.read_csv('mj-clean.csv', parse_dates=[5])
return transactions
def tmean(series):
"""Computes a trimmed mean.
series: Series
returns: float
"""
t = series.values
n = len(t)
if n <= 3:
return t.mean()
trim = max(1, n/10)
return np.mean(sorted(t)[trim:n-trim])
def GroupByDay(transactions, func=np.mean):
"""Groups transactions by day and compute the daily mean ppg.
transactions: DataFrame of transactions
returns: DataFrame of daily prices
"""
groups = transactions[['date', 'ppg']].groupby('date')
daily = groups.aggregate(func)
daily['date'] = daily.index
start = daily.date[0]
one_year = np.timedelta64(1, 'Y')
daily['years'] = (daily.date - start) / one_year
return daily
def GroupByQualityAndDay(transactions):
"""Divides transactions by quality and computes mean daily price.
transaction: DataFrame of transactions
returns: map from quality to time series of ppg
"""
groups = transactions.groupby('quality')
dailies = {}
for name, group in groups:
dailies[name] = GroupByDay(group)
return dailies
def PlotDailies(dailies):
"""Makes a plot with daily prices for different qualities.
dailies: map from name to DataFrame
"""
thinkplot.PrePlot(rows=3)
for i, (name, daily) in enumerate(dailies.items()):
thinkplot.SubPlot(i+1)
title = 'price per gram ($)' if i == 0 else ''
thinkplot.Config(ylim=[0, 20], title=title)
thinkplot.Scatter(daily.ppg, s=10, label=name)
if i == 2:
pyplot.xticks(rotation=30)
else:
thinkplot.Config(xticks=[])
thinkplot.Save(root='timeseries1',
formats=FORMATS)
def RunLinearModel(daily):
"""Runs a linear model of prices versus years.
daily: DataFrame of daily prices
returns: model, results
"""
model = smf.ols('ppg ~ years', data=daily)
results = model.fit()
return model, results
def PlotFittedValues(model, results, label=''):
"""Plots original data and fitted values.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
values = model.endog
thinkplot.Scatter(years, values, s=15, label=label)
thinkplot.Plot(years, results.fittedvalues, label='model')
def PlotResiduals(model, results):
"""Plots the residuals of a model.
model: StatsModel model object
results: StatsModel results object
"""
years = model.exog[:, 1]
thinkplot.Plot(years, results.resid, linewidth=0.5, alpha=0.5)
def PlotResidualPercentiles(model, results, index=1, num_bins=20):
"""Plots percentiles of the residuals.
model: StatsModel model object
results: StatsModel results object
index: which exogenous variable to use
num_bins: how many bins to divide the x-axis into
"""
exog = model.exog[:, index]
resid = results.resid.values
df = pandas.DataFrame(dict(exog=exog, resid=resid))
bins = np.linspace(np.min(exog), np.max(exog), num_bins)
indices = np.digitize(exog, bins)
groups = df.groupby(indices)
means = [group.exog.mean() for _, group in groups][1:-1]
cdfs = [thinkstats2.Cdf(group.resid) for _, group in groups][1:-1]
thinkplot.PrePlot(3)
for percent in [75, 50, 25]:
percentiles = [cdf.Percentile(percent) for cdf in cdfs]
label = '%dth' % percent
thinkplot.Plot(means, percentiles, label=label)
def SimulateResults(daily, iters=101, func=RunLinearModel):
"""Run simulations based on resampling residuals.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
_, results = func(daily)
fake = daily.copy()
result_seq = []
for _ in range(iters):
fake.ppg = results.fittedvalues + thinkstats2.Resample(results.resid)
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def SimulateIntervals(daily, iters=101, func=RunLinearModel):
"""Run simulations based on different subsets of the data.
daily: DataFrame of daily prices
iters: number of simulations
func: function that fits a model to the data
returns: list of result objects
"""
result_seq = []
starts = np.linspace(0, len(daily), iters).astype(int)
for start in starts[:-2]:
subset = daily[start:]
_, results = func(subset)
fake = subset.copy()
for _ in range(iters):
fake.ppg = (results.fittedvalues +
thinkstats2.Resample(results.resid))
_, fake_results = func(fake)
result_seq.append(fake_results)
return result_seq
def GeneratePredictions(result_seq, years, add_resid=False):
"""Generates an array of predicted values from a list of model results.
When add_resid is False, predictions represent sampling error only.
When add_resid is True, they also include residual error (which is
more relevant to prediction).
result_seq: list of model results
years: sequence of times (in years) to make predictions for
add_resid: boolean, whether to add in resampled residuals
returns: sequence of predictions
"""
n = len(years)
d = dict(Intercept=np.ones(n), years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict_seq = []
for fake_results in result_seq:
predict = fake_results.predict(predict_df)
if add_resid:
predict += thinkstats2.Resample(fake_results.resid, n)
predict_seq.append(predict)
return predict_seq
def GenerateSimplePrediction(results, years):
"""Generates a simple prediction.
results: results object
years: sequence of times (in years) to make predictions for
returns: sequence of predicted values
"""
n = len(years)
inter = np.ones(n)
d = dict(Intercept=inter, years=years, years2=years**2)
predict_df = pandas.DataFrame(d)
predict = results.predict(predict_df)
return predict
def PlotPredictions(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateResults(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.3, color='gray')
predict_seq = GeneratePredictions(result_seq, years, add_resid=False)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.5, color='gray')
def PlotIntervals(daily, years, iters=101, percent=90, func=RunLinearModel):
"""Plots predictions based on different intervals.
daily: DataFrame of daily prices
years: sequence of times (in years) to make predictions for
iters: number of simulations
percent: what percentile range to show
func: function that fits a model to the data
"""
result_seq = SimulateIntervals(daily, iters=iters, func=func)
p = (100 - percent) / 2
percents = p, 100-p
predict_seq = GeneratePredictions(result_seq, years, add_resid=True)
low, high = thinkstats2.PercentileRows(predict_seq, percents)
thinkplot.FillBetween(years, low, high, alpha=0.2, color='gray')
def Correlate(dailies):
"""Compute the correlation matrix between prices for difference qualities.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
df[name] = daily.ppg
return df.corr()
def CorrelateResid(dailies):
"""Compute the correlation matrix between residuals.
dailies: map from quality to time series of ppg
returns: correlation matrix
"""
df = pandas.DataFrame()
for name, daily in dailies.items():
_, results = RunLinearModel(daily)
df[name] = results.resid
return df.corr()
def TestCorrelateResid(dailies, iters=101):
"""Tests observed correlations.
dailies: map from quality to time series of ppg
iters: number of simulations
"""
t = []
names = ['high', 'medium', 'low']
for name in names:
daily = dailies[name]
t.append(SimulateResults(daily, iters=iters))
corr = CorrelateResid(dailies)
arrays = []
for result_seq in zip(*t):
df = pandas.DataFrame()
for name, results in zip(names, result_seq):
df[name] = results.resid
opp_sign = corr * df.corr() < 0
arrays.append((opp_sign.astype(int)))
print(np.sum(arrays))
def RunModels(dailies):
"""Runs linear regression for each group in dailies.
dailies: map from group name to DataFrame
"""
rows = []
for daily in dailies.values():
_, results = RunLinearModel(daily)
intercept, slope = results.params
p1, p2 = results.pvalues
r2 = results.rsquared
s = r'%0.3f (%0.2g) & %0.3f (%0.2g) & %0.3f \\'
row = s % (intercept, p1, slope, p2, r2)
rows.append(row)
# print results in a LaTeX table
print(r'\begin{tabular}{|c|c|c|}')
print(r'\hline')
print(r'intercept & slope & $R^2$ \\ \hline')
for row in rows:
print(row)
print(r'\hline')
print(r'\end{tabular}')
def FillMissing(daily, span=30):
"""Fills missing values with an exponentially weighted moving average.
Resulting DataFrame has new columns 'ewma' and 'resid'.
daily: DataFrame of daily prices
span: window size (sort of) passed to ewma
returns: new DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
ewma = pandas.ewma(reindexed.ppg, span=span)
resid = (reindexed.ppg - ewma).dropna()
fake_data = ewma + thinkstats2.Resample(resid, len(reindexed))
reindexed.ppg.fillna(fake_data, inplace=True)
reindexed['ewma'] = ewma
reindexed['resid'] = reindexed.ppg - ewma
return reindexed
def AddWeeklySeasonality(daily):
"""Adds a weekly pattern.
daily: DataFrame of daily prices
returns: new DataFrame of daily prices
"""
frisat = (daily.index.dayofweek==4) | (daily.index.dayofweek==5)
fake = daily.copy()
fake.ppg[frisat] += np.random.uniform(0, 2, frisat.sum())
return fake
def PrintSerialCorrelations(dailies):
"""Prints a table of correlations with different lags.
dailies: map from category name to DataFrame of daily prices
"""
filled_dailies = {}
for name, daily in dailies.items():
filled_dailies[name] = FillMissing(daily, span=30)
# print serial correlations for raw price data
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.ppg, lag=1)
print(name, corr)
rows = []
for lag in [1, 7, 30, 365]:
row = [str(lag)]
for name, filled in filled_dailies.items():
corr = thinkstats2.SerialCorr(filled.resid, lag)
row.append('%.2g' % corr)
rows.append(row)
print(r'\begin{tabular}{|c|c|c|c|}')
print(r'\hline')
print(r'lag & high & medium & low \\ \hline')
for row in rows:
print(' & '.join(row) + r' \\')
print(r'\hline')
print(r'\end{tabular}')
filled = filled_dailies['high']
acf = smtsa.acf(filled.resid, nlags=365, unbiased=True)
print('%0.3f, %0.3f, %0.3f, %0.3f, %0.3f' %
(acf[0], acf[1], acf[7], acf[30], acf[365]))
def SimulateAutocorrelation(daily, iters=1001, nlags=40):
"""Resample residuals, compute autocorrelation, and plot percentiles.
daily: DataFrame
iters: number of simulations to run
nlags: maximum lags to compute autocorrelation
"""
# run simulations
t = []
for _ in range(iters):
filled = FillMissing(daily, span=30)
resid = thinkstats2.Resample(filled.resid)
acf = smtsa.acf(resid, nlags=nlags, unbiased=True)[1:]
t.append(np.abs(acf))
high = thinkstats2.PercentileRows(t, [97.5])[0]
low = -high
lags = range(1, nlags+1)
thinkplot.FillBetween(lags, low, high, alpha=0.2, color='gray')
def PlotAutoCorrelation(dailies, nlags=40, add_weekly=False):
"""Plots autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
nlags: number of lags to compute
add_weekly: boolean, whether to add a simulated weekly pattern
"""
thinkplot.PrePlot(3)
daily = dailies['high']
SimulateAutocorrelation(daily)
for name, daily in dailies.items():
if add_weekly:
daily = AddWeeklySeasonality(daily)
filled = FillMissing(daily, span=30)
acf = smtsa.acf(filled.resid, nlags=nlags, unbiased=True)
lags = np.arange(len(acf))
thinkplot.Plot(lags[1:], acf[1:], label=name)
def MakeAcfPlot(dailies):
"""Makes a figure showing autocorrelation functions.
dailies: map from category name to DataFrame of daily prices
"""
axis = [0, 41, -0.2, 0.2]
thinkplot.PrePlot(cols=2)
PlotAutoCorrelation(dailies, add_weekly=False)
thinkplot.Config(axis=axis,
loc='lower right',
ylabel='correlation',
xlabel='lag (day)')
thinkplot.SubPlot(2)
PlotAutoCorrelation(dailies, add_weekly=True)
thinkplot.Save(root='timeseries9',
axis=axis,
loc='lower right',
xlabel='lag (days)',
formats=FORMATS)
def PlotRollingMean(daily, name):
"""Plots rolling mean and EWMA.
daily: DataFrame of daily prices
"""
dates = pandas.date_range(daily.index.min(), daily.index.max())
reindexed = daily.reindex(dates)
thinkplot.PrePlot(cols=2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
roll_mean = pandas.rolling_mean(reindexed.ppg, 30)
thinkplot.Plot(roll_mean, label='rolling mean')
pyplot.xticks(rotation=30)
thinkplot.Config(ylabel='price per gram ($)')
thinkplot.SubPlot(2)
thinkplot.Scatter(reindexed.ppg, s=15, alpha=0.1, label=name)
ewma = pandas.ewma(reindexed.ppg, span=30)
thinkplot.Plot(ewma, label='EWMA')
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries10',
formats=FORMATS)
def PlotFilled(daily, name):
"""Plots the EWMA and filled data.
daily: DataFrame of daily prices
"""
filled = FillMissing(daily, span=30)
thinkplot.Scatter(filled.ppg, s=15, alpha=0.3, label=name)
thinkplot.Plot(filled.ewma, label='EWMA', alpha=0.4)
pyplot.xticks(rotation=30)
thinkplot.Save(root='timeseries8',
ylabel='price per gram ($)',
formats=FORMATS)
def PlotLinearModel(daily, name):
"""Plots a linear fit to a sequence of prices, and the residuals.
daily: DataFrame of daily prices
name: string
"""
model, results = RunLinearModel(daily)
PlotFittedValues(model, results, label=name)
thinkplot.Save(root='timeseries2',
title='fitted values',
xlabel='years',
xlim=[-0.1, 3.8],
ylabel='price per gram ($)',
formats=FORMATS)
PlotResidualPercentiles(model, results)
thinkplot.Save(root='timeseries3',
title='residuals',
xlabel='years',
ylabel='price per gram ($)',
formats=FORMATS)
#years = np.linspace(0, 5, 101)
#predict = GenerateSimplePrediction(results, years)
def main(name):
thinkstats2.RandomSeed(18)
transactions = ReadData()
dailies = GroupByQualityAndDay(transactions)
PlotDailies(dailies)
RunModels(dailies)
PrintSerialCorrelations(dailies)
MakeAcfPlot(dailies)
name = 'high'
daily = dailies[name]
PlotLinearModel(daily, name)
PlotRollingMean(daily, name)
PlotFilled(daily, name)
years = np.linspace(0, 5, 101)
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries4',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
name = 'medium'
daily = dailies[name]
thinkplot.Scatter(daily.years, daily.ppg, alpha=0.1, label=name)
PlotIntervals(daily, years)
PlotPredictions(daily, years)
xlim = years[0]-0.1, years[-1]+0.1
thinkplot.Save(root='timeseries5',
title='predictions',
xlabel='years',
xlim=xlim,
ylabel='price per gram ($)',
formats=FORMATS)
if __name__ == '__main__':
import sys
main(*sys.argv)
| gpl-3.0 |
yanlend/scikit-learn | examples/datasets/plot_iris_dataset.py | 283 | 1928 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| bsd-3-clause |
simon-pepin/scikit-learn | sklearn/semi_supervised/label_propagation.py | 128 | 15312 | # coding=utf8
"""
Label propagation in the context of this module refers to a set of
semisupervised classification algorithms. In the high level, these algorithms
work by forming a fully-connected graph between all points given and solving
for the steady-state distribution of labels at each point.
These algorithms perform very well in practice. The cost of running can be very
expensive, at approximately O(N^3) where N is the number of (labeled and
unlabeled) points. The theory (why they perform so well) is motivated by
intuitions from random walk algorithms and geometric relationships in the data.
For more information see the references below.
Model Features
--------------
Label clamping:
The algorithm tries to learn distributions of labels over the dataset. In the
"Hard Clamp" mode, the true ground labels are never allowed to change. They
are clamped into position. In the "Soft Clamp" mode, they are allowed some
wiggle room, but some alpha of their original value will always be retained.
Hard clamp is the same as soft clamping with alpha set to 1.
Kernel:
A function which projects a vector into some higher dimensional space. This
implementation supprots RBF and KNN kernels. Using the RBF kernel generates
a dense matrix of size O(N^2). KNN kernel will generate a sparse matrix of
size O(k*N) which will run much faster. See the documentation for SVMs for
more info on kernels.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
Notes
-----
References:
[1] Yoshua Bengio, Olivier Delalleau, Nicolas Le Roux. In Semi-Supervised
Learning (2006), pp. 193-216
[2] Olivier Delalleau, Yoshua Bengio, Nicolas Le Roux. Efficient
Non-Parametric Function Induction in Semi-Supervised Learning. AISTAT 2005
"""
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
from abc import ABCMeta, abstractmethod
from scipy import sparse
import numpy as np
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import rbf_kernel
from ..utils.graph import graph_laplacian
from ..utils.extmath import safe_sparse_dot
from ..utils.validation import check_X_y, check_is_fitted
from ..externals import six
from ..neighbors.unsupervised import NearestNeighbors
### Helper functions
def _not_converged(y_truth, y_prediction, tol=1e-3):
"""basic convergence check"""
return np.abs(y_truth - y_prediction).sum() > tol
class BaseLabelPropagation(six.with_metaclass(ABCMeta, BaseEstimator,
ClassifierMixin)):
"""Base class for label propagation module.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
n_neighbors : integer > 0
Parameter for knn kernel
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7,
alpha=1, max_iter=30, tol=1e-3):
self.max_iter = max_iter
self.tol = tol
# kernel parameters
self.kernel = kernel
self.gamma = gamma
self.n_neighbors = n_neighbors
# clamping factor
self.alpha = alpha
def _get_kernel(self, X, y=None):
if self.kernel == "rbf":
if y is None:
return rbf_kernel(X, X, gamma=self.gamma)
else:
return rbf_kernel(X, y, gamma=self.gamma)
elif self.kernel == "knn":
if self.nn_fit is None:
self.nn_fit = NearestNeighbors(self.n_neighbors).fit(X)
if y is None:
return self.nn_fit.kneighbors_graph(self.nn_fit._fit_X,
self.n_neighbors,
mode='connectivity')
else:
return self.nn_fit.kneighbors(y, return_distance=False)
else:
raise ValueError("%s is not a valid kernel. Only rbf and knn"
" are supported at this time" % self.kernel)
@abstractmethod
def _build_graph(self):
raise NotImplementedError("Graph construction must be implemented"
" to fit a label propagation model.")
def predict(self, X):
"""Performs inductive inference across the model.
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
y : array_like, shape = [n_samples]
Predictions for input data
"""
probas = self.predict_proba(X)
return self.classes_[np.argmax(probas, axis=1)].ravel()
def predict_proba(self, X):
"""Predict probability for each possible outcome.
Compute the probability estimates for each single sample in X
and each possible outcome seen during training (categorical
distribution).
Parameters
----------
X : array_like, shape = [n_samples, n_features]
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Normalized probability distributions across
class labels
"""
check_is_fitted(self, 'X_')
if sparse.isspmatrix(X):
X_2d = X
else:
X_2d = np.atleast_2d(X)
weight_matrices = self._get_kernel(self.X_, X_2d)
if self.kernel == 'knn':
probabilities = []
for weight_matrix in weight_matrices:
ine = np.sum(self.label_distributions_[weight_matrix], axis=0)
probabilities.append(ine)
probabilities = np.array(probabilities)
else:
weight_matrices = weight_matrices.T
probabilities = np.dot(weight_matrices, self.label_distributions_)
normalizer = np.atleast_2d(np.sum(probabilities, axis=1)).T
probabilities /= normalizer
return probabilities
def fit(self, X, y):
"""Fit a semi-supervised label propagation model based
All the input data is provided matrix X (labeled and unlabeled)
and corresponding label matrix y with a dedicated marker value for
unlabeled samples.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
A {n_samples by n_samples} size matrix will be created from this
y : array_like, shape = [n_samples]
n_labeled_samples (unlabeled points are marked as -1)
All unlabeled samples will be transductively assigned labels
Returns
-------
self : returns an instance of self.
"""
X, y = check_X_y(X, y)
self.X_ = X
# actual graph construction (implementations should override this)
graph_matrix = self._build_graph()
# label construction
# construct a categorical distribution for classification only
classes = np.unique(y)
classes = (classes[classes != -1])
self.classes_ = classes
n_samples, n_classes = len(y), len(classes)
y = np.asarray(y)
unlabeled = y == -1
clamp_weights = np.ones((n_samples, 1))
clamp_weights[unlabeled, 0] = self.alpha
# initialize distributions
self.label_distributions_ = np.zeros((n_samples, n_classes))
for label in classes:
self.label_distributions_[y == label, classes == label] = 1
y_static = np.copy(self.label_distributions_)
if self.alpha > 0.:
y_static *= 1 - self.alpha
y_static[unlabeled] = 0
l_previous = np.zeros((self.X_.shape[0], n_classes))
remaining_iter = self.max_iter
if sparse.isspmatrix(graph_matrix):
graph_matrix = graph_matrix.tocsr()
while (_not_converged(self.label_distributions_, l_previous, self.tol)
and remaining_iter > 1):
l_previous = self.label_distributions_
self.label_distributions_ = safe_sparse_dot(
graph_matrix, self.label_distributions_)
# clamp
self.label_distributions_ = np.multiply(
clamp_weights, self.label_distributions_) + y_static
remaining_iter -= 1
normalizer = np.sum(self.label_distributions_, axis=1)[:, np.newaxis]
self.label_distributions_ /= normalizer
# set the transduction item
transduction = self.classes_[np.argmax(self.label_distributions_,
axis=1)]
self.transduction_ = transduction.ravel()
self.n_iter_ = self.max_iter - remaining_iter
return self
class LabelPropagation(BaseLabelPropagation):
"""Label Propagation classifier
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported..
gamma : float
Parameter for rbf kernel
n_neighbors : integer > 0
Parameter for knn kernel
alpha : float
Clamping factor
max_iter : float
Change maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelPropagation
>>> label_prop_model = LabelPropagation()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelPropagation(...)
References
----------
Xiaojin Zhu and Zoubin Ghahramani. Learning from labeled and unlabeled data
with label propagation. Technical Report CMU-CALD-02-107, Carnegie Mellon
University, 2002 http://pages.cs.wisc.edu/~jerryzhu/pub/CMU-CALD-02-107.pdf
See Also
--------
LabelSpreading : Alternate label propagation strategy more robust to noise
"""
def _build_graph(self):
"""Matrix representing a fully connected graph between each sample
This basic implementation creates a non-stochastic affinity matrix, so
class distributions will exceed 1 (normalization may be desired).
"""
if self.kernel == 'knn':
self.nn_fit = None
affinity_matrix = self._get_kernel(self.X_)
normalizer = affinity_matrix.sum(axis=0)
if sparse.isspmatrix(affinity_matrix):
affinity_matrix.data /= np.diag(np.array(normalizer))
else:
affinity_matrix /= normalizer[:, np.newaxis]
return affinity_matrix
class LabelSpreading(BaseLabelPropagation):
"""LabelSpreading model for semi-supervised learning
This model is similar to the basic Label Propgation algorithm,
but uses affinity matrix based on the normalized graph Laplacian
and soft clamping across the labels.
Read more in the :ref:`User Guide <label_propagation>`.
Parameters
----------
kernel : {'knn', 'rbf'}
String identifier for kernel function to use.
Only 'rbf' and 'knn' kernels are currently supported.
gamma : float
parameter for rbf kernel
n_neighbors : integer > 0
parameter for knn kernel
alpha : float
clamping factor
max_iter : float
maximum number of iterations allowed
tol : float
Convergence tolerance: threshold to consider the system at steady
state
Attributes
----------
X_ : array, shape = [n_samples, n_features]
Input array.
classes_ : array, shape = [n_classes]
The distinct labels used in classifying instances.
label_distributions_ : array, shape = [n_samples, n_classes]
Categorical distribution for each item.
transduction_ : array, shape = [n_samples]
Label assigned to each item via the transduction.
n_iter_ : int
Number of iterations run.
Examples
--------
>>> from sklearn import datasets
>>> from sklearn.semi_supervised import LabelSpreading
>>> label_prop_model = LabelSpreading()
>>> iris = datasets.load_iris()
>>> random_unlabeled_points = np.where(np.random.random_integers(0, 1,
... size=len(iris.target)))
>>> labels = np.copy(iris.target)
>>> labels[random_unlabeled_points] = -1
>>> label_prop_model.fit(iris.data, labels)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
LabelSpreading(...)
References
----------
Dengyong Zhou, Olivier Bousquet, Thomas Navin Lal, Jason Weston,
Bernhard Schoelkopf. Learning with local and global consistency (2004)
http://citeseer.ist.psu.edu/viewdoc/summary?doi=10.1.1.115.3219
See Also
--------
LabelPropagation : Unregularized graph based semi-supervised learning
"""
def __init__(self, kernel='rbf', gamma=20, n_neighbors=7, alpha=0.2,
max_iter=30, tol=1e-3):
# this one has different base parameters
super(LabelSpreading, self).__init__(kernel=kernel, gamma=gamma,
n_neighbors=n_neighbors,
alpha=alpha, max_iter=max_iter,
tol=tol)
def _build_graph(self):
"""Graph matrix for Label Spreading computes the graph laplacian"""
# compute affinity matrix (or gram matrix)
if self.kernel == 'knn':
self.nn_fit = None
n_samples = self.X_.shape[0]
affinity_matrix = self._get_kernel(self.X_)
laplacian = graph_laplacian(affinity_matrix, normed=True)
laplacian = -laplacian
if sparse.isspmatrix(laplacian):
diag_mask = (laplacian.row == laplacian.col)
laplacian.data[diag_mask] = 0.0
else:
laplacian.flat[::n_samples + 1] = 0.0 # set diag to 0.0
return laplacian
| bsd-3-clause |
thp44/delphin_6_automation | data_process/2d_1d/simon/example_sim.py | 1 | 7655 | __author__ = "Christian Kongsgaard"
__license__ = 'MIT'
# -------------------------------------------------------------------------------------------------------------------- #
# IMPORTS
# Modules
import os
import json
import pandas as pd
import xmltodict
import shutil
# RiBuild Modules
from delphin_6_automation.database_interactions import mongo_setup
from delphin_6_automation.database_interactions.auth import auth_2d_1d as auth_dict
from delphin_6_automation.sampling import inputs
from delphin_6_automation.database_interactions import material_interactions
from delphin_6_automation.delphin_setup import delphin_permutations
from delphin_6_automation.file_parsing import delphin_parser
# -------------------------------------------------------------------------------------------------------------------- #
# RIBuild
server = mongo_setup.global_init(auth_dict)
def create_2d_designs(folder):
bricks = pd.read_excel(os.path.join(folder, 'Brick.xlsx'))
plasters = pd.read_excel(os.path.join(folder, 'Plaster.xlsx'))
ref_folder = os.path.join(folder, 'delphin')
for file in os.listdir(ref_folder):
delphin_dict = delphin_parser.dp6_to_dict(os.path.join(ref_folder, file))
for p_index, p_id in enumerate(plasters['Material ID']):
new_material = material_interactions.get_material_info(p_id)
plaster_delphin = delphin_permutations.change_layer_material(delphin_dict,
'Lime cement mortar [717]',
new_material)
for index, mat_id in enumerate(bricks['Material ID']):
new_material = material_interactions.get_material_info(mat_id)
new_delphin = delphin_permutations.change_layer_material(plaster_delphin,
'Old Building Brick Dresden ZP [504]',
new_material)
file_name = f'{file.split(".")[0]}_{plasters.iloc[p_index, 1]}_{bricks.iloc[index, 1]}.d6p'
xmltodict.unparse(new_delphin,
output=open(os.path.join(folder, 'design', file_name),
'w'), pretty=True)
def create_1d_designs(folder):
bricks = pd.read_excel(os.path.join(folder, 'Brick.xlsx'))
plasters = pd.read_excel(os.path.join(folder, 'Plaster.xlsx'))
ref_folder = os.path.join(folder, 'delphin')
temp_folder = os.path.join(folder, 'temp')
thickness = [0.228, 0.348, 0.468]
for file in os.listdir(ref_folder):
for thick in thickness:
delphin_dict = delphin_parser.dp6_to_dict(os.path.join(ref_folder, file))
thick_delphin = delphin_permutations.change_layer_width(delphin_dict,
'Old Building Brick Dresden ZP [504]',
thick)
thick_delphin = delphin_permutations.update_output_locations(thick_delphin)
for p_index, p_id in enumerate(plasters['Material ID']):
new_material = material_interactions.get_material_info(p_id)
new_delphin = delphin_permutations.change_layer_material(thick_delphin,
'Lime cement mortar [717]',
new_material)
file_name = '_'.join(file.split('_')[:2]) + f'_{int((thick+0.012)*100)}cm_1D_{plasters.iloc[p_index, 1]}.d6p'
xmltodict.unparse(new_delphin,
output=open(os.path.join(temp_folder, file_name),
'w'), pretty=True)
for file in os.listdir(temp_folder):
delphin_dict = delphin_parser.dp6_to_dict(os.path.join(temp_folder, file))
for index, mat_id in enumerate(bricks['Material ID']):
new_material = material_interactions.get_material_info(mat_id)
new_delphin = delphin_permutations.change_layer_material(delphin_dict,
'Old Building Brick Dresden ZP [504]',
new_material)
file_name = f'{file.split(".")[0]}_{bricks.iloc[index, 1]}.d6p'
xmltodict.unparse(new_delphin,
output=open(os.path.join(folder, 'design', file_name),
'w'), pretty=True)
def create_sampling_strategy(path: str, design_option: list) -> dict:
"""
Create a sampling strategy for WP6 Delphin Automation. The sampling strategy will be name 'sampling_strategy.json'
and be located at the given folder.
"""
design = [design_.split('.')[0] for design_ in design_option]
scenario = {'generic_scenario': None}
distributions = {'exterior_climate':
{'type': 'discrete', 'range': ['Weimar', 'Bremen', 'MuenchenAirp']},
'exterior_heat_transfer_coefficient_slope':
{'type': 'uniform', 'range': [1, 4], },
'exterior_moisture_transfer_coefficient':
{'type': 'discrete', 'range': [7.7*10**-9]},
'solar_absorption':
{'type': 'uniform', 'range': [0.4, 0.8], },
'rain_scale_factor':
{'type': 'uniform', 'range': [0, 2], },
'interior_climate':
{'type': 'discrete', 'range': ['a', 'b'], },
'wall_orientation':
{'type': 'uniform', 'range': [0, 360], },
'start_year':
{'type': 'discrete', 'range': [i for i in range(2020, 2046)], },
}
sampling_settings = {'initial samples per set': 1,
'add samples per run': 1,
'max samples': 500,
'sequence': 10,
'standard error threshold': 0.1,
'raw sample size': 2 ** 9}
combined_dict = {'design': design, 'scenario': scenario,
'distributions': distributions, 'settings': sampling_settings}
with open(os.path.join(path, 'sampling_strategy.json'), 'w') as file:
json.dump(combined_dict, file)
return combined_dict
def copy_designs(folder):
folder_1d = os.path.join(folder, '1D', 'design')
folder_2d = os.path.join(folder, '2D', 'design')
dst_folder = os.path.join(folder, 'designs')
print('Copy 1D')
for file1d in os.listdir(folder_1d):
shutil.copyfile(os.path.join(folder_1d, file1d), os.path.join(dst_folder, file1d))
print('Copy 2D')
for file2d in os.listdir(folder_2d):
shutil.copyfile(os.path.join(folder_2d, file2d), os.path.join(dst_folder, file2d))
folder_ = r'C:\Users\ocni\OneDrive - Danmarks Tekniske Universitet\Shared WP6 DTU-SBiAAU'
folder_1d = os.path.join(folder_, '1D')
folder_2d = os.path.join(folder_, '2D')
folder_strategy = os.path.join(folder_, 'sampling_strategy')
folder_design = os.path.join(folder_, 'designs')
#create_1d_designs(folder_1d)
create_2d_designs(folder_2d)
copy_designs(folder_)
design_options = os.listdir(folder_design)
create_sampling_strategy(folder_strategy, design_options)
mongo_setup.global_end_ssh(server)
| mit |
macks22/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
jkarnows/scikit-learn | benchmarks/bench_glm.py | 297 | 1493 | """
A comparison of different methods in GLM
Data comes from a random square matrix.
"""
from datetime import datetime
import numpy as np
from sklearn import linear_model
from sklearn.utils.bench import total_seconds
if __name__ == '__main__':
import pylab as pl
n_iter = 40
time_ridge = np.empty(n_iter)
time_ols = np.empty(n_iter)
time_lasso = np.empty(n_iter)
dimensions = 500 * np.arange(1, n_iter + 1)
for i in range(n_iter):
print('Iteration %s of %s' % (i, n_iter))
n_samples, n_features = 10 * i + 3, 10 * i + 3
X = np.random.randn(n_samples, n_features)
Y = np.random.randn(n_samples)
start = datetime.now()
ridge = linear_model.Ridge(alpha=1.)
ridge.fit(X, Y)
time_ridge[i] = total_seconds(datetime.now() - start)
start = datetime.now()
ols = linear_model.LinearRegression()
ols.fit(X, Y)
time_ols[i] = total_seconds(datetime.now() - start)
start = datetime.now()
lasso = linear_model.LassoLars()
lasso.fit(X, Y)
time_lasso[i] = total_seconds(datetime.now() - start)
pl.figure('scikit-learn GLM benchmark results')
pl.xlabel('Dimensions')
pl.ylabel('Time (s)')
pl.plot(dimensions, time_ridge, color='r')
pl.plot(dimensions, time_ols, color='g')
pl.plot(dimensions, time_lasso, color='b')
pl.legend(['Ridge', 'OLS', 'LassoLars'], loc='upper left')
pl.axis('tight')
pl.show()
| bsd-3-clause |
russel1237/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
LiaoPan/scikit-learn | examples/exercises/plot_iris_exercise.py | 323 | 1602 | """
================================
SVM Exercise
================================
A tutorial exercise for using different SVM kernels.
This exercise is used in the :ref:`using_kernels_tut` part of the
:ref:`supervised_learning_tut` section of the :ref:`stat_learn_tut_index`.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, svm
iris = datasets.load_iris()
X = iris.data
y = iris.target
X = X[y != 0, :2]
y = y[y != 0]
n_sample = len(X)
np.random.seed(0)
order = np.random.permutation(n_sample)
X = X[order]
y = y[order].astype(np.float)
X_train = X[:.9 * n_sample]
y_train = y[:.9 * n_sample]
X_test = X[.9 * n_sample:]
y_test = y[.9 * n_sample:]
# fit the model
for fig_num, kernel in enumerate(('linear', 'rbf', 'poly')):
clf = svm.SVC(kernel=kernel, gamma=10)
clf.fit(X_train, y_train)
plt.figure(fig_num)
plt.clf()
plt.scatter(X[:, 0], X[:, 1], c=y, zorder=10, cmap=plt.cm.Paired)
# Circle out the test data
plt.scatter(X_test[:, 0], X_test[:, 1], s=80, facecolors='none', zorder=10)
plt.axis('tight')
x_min = X[:, 0].min()
x_max = X[:, 0].max()
y_min = X[:, 1].min()
y_max = X[:, 1].max()
XX, YY = np.mgrid[x_min:x_max:200j, y_min:y_max:200j]
Z = clf.decision_function(np.c_[XX.ravel(), YY.ravel()])
# Put the result into a color plot
Z = Z.reshape(XX.shape)
plt.pcolormesh(XX, YY, Z > 0, cmap=plt.cm.Paired)
plt.contour(XX, YY, Z, colors=['k', 'k', 'k'], linestyles=['--', '-', '--'],
levels=[-.5, 0, .5])
plt.title(kernel)
plt.show()
| bsd-3-clause |
michigraber/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
xzh86/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
jschuecker/nest-simulator | pynest/examples/plot_weight_matrices.py | 17 | 6243 | # -*- coding: utf-8 -*-
#
# plot_weight_matrices.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
'''
Plot weight matrices example
----------------------------
This example demonstrates how to extract the connection strength
for all the synapses among two populations of neurons and gather
these values in weight matrices for further analysis and visualization.
All connection types between these populations are considered, i.e.,
four weight matrices are created and plotted.
'''
'''
First, we import all necessary modules to extract, handle and plot
the connectivity matrices
'''
import numpy as np
import pylab
import nest
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
'''
We now specify a function which takes as arguments lists of neuron gids
corresponding to each population
'''
def plot_weight_matrices(E_neurons, I_neurons):
'''
Function to extract and plot weight matrices for all connections
among E_neurons and I_neurons
'''
'''
First, we initialize all the matrices, whose dimensionality is
determined by the number of elements in each population
Since in this example, we have 2 populations (E/I), 2^2 possible
synaptic connections exist (EE, EI, IE, II)
'''
W_EE = np.zeros([len(E_neurons), len(E_neurons)])
W_EI = np.zeros([len(I_neurons), len(E_neurons)])
W_IE = np.zeros([len(E_neurons), len(I_neurons)])
W_II = np.zeros([len(I_neurons), len(I_neurons)])
'''
Using `GetConnections`, we extract the information about all the
connections involving the populations of interest. `GetConnections`
returns a list of arrays (connection objects), one per connection.
Each array has the following elements:
[source-gid target-gid target-thread synapse-model-id port]
'''
a_EE = nest.GetConnections(E_neurons, E_neurons)
'''
Using `GetStatus`, we can extract the value of the connection weight,
for all the connections between these populations
'''
c_EE = nest.GetStatus(a_EE, keys='weight')
'''
Repeat the two previous steps for all other connection types
'''
a_EI = nest.GetConnections(I_neurons, E_neurons)
c_EI = nest.GetStatus(a_EI, keys='weight')
a_IE = nest.GetConnections(E_neurons, I_neurons)
c_IE = nest.GetStatus(a_IE, keys='weight')
a_II = nest.GetConnections(I_neurons, I_neurons)
c_II = nest.GetStatus(a_II, keys='weight')
'''
We now iterate through the list of all connections of each type.
To populate the corresponding weight matrix, we begin by identifying
the source-gid (first element of each connection object, n[0])
and the target-gid (second element of each connection object, n[1]).
For each gid, we subtract the minimum gid within the corresponding
population, to assure the matrix indices range from 0 to the size of
the population.
After determining the matrix indices [i, j], for each connection
object, the corresponding weight is added to the entry W[i,j].
The procedure is then repeated for all the different connection types.
'''
for idx, n in enumerate(a_EE):
W_EE[n[0] - min(E_neurons), n[1] - min(E_neurons)] += c_EE[idx]
for idx, n in enumerate(a_EI):
W_EI[n[0] - min(I_neurons), n[1] - min(E_neurons)] += c_EI[idx]
for idx, n in enumerate(a_IE):
W_IE[n[0] - min(E_neurons), n[1] - min(I_neurons)] += c_IE[idx]
for idx, n in enumerate(a_II):
W_II[n[0] - min(I_neurons), n[1] - min(I_neurons)] += c_II[idx]
'''
We can now specify the figure and axes properties. For this specific
example, we wish to display all the weight matrices in a single
figure, which requires us to use ``GridSpec`` (for example)
to specify the spatial arrangement of the axes.
A subplot is subsequently created for each connection type.
'''
fig = pylab.figure()
fig.suptitle('Weight matrices', fontsize=14)
gs = gridspec.GridSpec(4, 4)
ax1 = pylab.subplot(gs[:-1, :-1])
ax2 = pylab.subplot(gs[:-1, -1])
ax3 = pylab.subplot(gs[-1, :-1])
ax4 = pylab.subplot(gs[-1, -1])
'''
Using ``imshow``, we can visualize the weight matrix in the corresponding
axis. We can also specify the colormap for this image.
'''
plt1 = ax1.imshow(W_EE, cmap='jet')
'''
Using the ``axis_divider`` module from ``mpl_toolkits``, we can
allocate a small extra space on the right of the current axis,
which we reserve for a colorbar.
'''
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt1, cax=cax)
'''
We now set the title of each axis and adjust the axis subplot parameters
'''
ax1.set_title('W_{EE}')
pylab.tight_layout()
'''
Finally, the last three steps are repeated for each synapse type
'''
plt2 = ax2.imshow(W_IE)
plt2.set_cmap('jet')
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt2, cax=cax)
ax2.set_title('W_{EI}')
pylab.tight_layout()
plt3 = ax3.imshow(W_EI)
plt3.set_cmap('jet')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt3, cax=cax)
ax3.set_title('W_{IE}')
pylab.tight_layout()
plt4 = ax4.imshow(W_II)
plt4.set_cmap('jet')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", "5%", pad="3%")
pylab.colorbar(plt4, cax=cax)
ax4.set_title('W_{II}')
pylab.tight_layout()
| gpl-2.0 |
lanselin/pysal | pysal/esda/tests/test_join_counts.py | 6 | 2224 | import unittest
import numpy as np
from ..join_counts import Join_Counts
from ...weights import lat2W
from ...common import pandas
PANDAS_EXTINCT = pandas is None
class Join_Counts_Tester(unittest.TestCase):
"""Unit test for Join Counts"""
def setUp(self):
self.w = lat2W(4, 4)
self.y = np.ones(16)
self.y[0:8] = 0
def test_Join_Counts(self):
"""Test method"""
np.random.seed(12345)
jc = Join_Counts(self.y, self.w)
self.assertAlmostEquals(jc.bb, 10.0)
self.assertAlmostEquals(jc.bw, 4.0)
self.assertAlmostEquals(jc.ww, 10.0)
self.assertAlmostEquals(jc.J, 24.0)
self.assertAlmostEquals(len(jc.sim_bb), 999)
self.assertAlmostEquals(jc.p_sim_bb, 0.0030000000000000001)
self.assertAlmostEquals(np.mean(jc.sim_bb), 5.5465465465465469)
self.assertAlmostEquals(np.max(jc.sim_bb), 10.0)
self.assertAlmostEquals(np.min(jc.sim_bb), 0.0)
self.assertAlmostEquals(len(jc.sim_bw), 999)
self.assertAlmostEquals(jc.p_sim_bw, 1.0)
self.assertAlmostEquals(np.mean(jc.sim_bw), 12.811811811811811)
self.assertAlmostEquals(np.max(jc.sim_bw), 24.0)
self.assertAlmostEquals(np.min(jc.sim_bw), 7.0)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
np.random.seed(12345)
r1 = Join_Counts.by_col(df, ['y'], w=self.w, permutations=999)
bb = np.unique(r1.y_bb.values)
bw = np.unique(r1.y_bw.values)
bb_p = np.unique(r1.y_p_sim_bb.values)
bw_p = np.unique(r1.y_p_sim_bw.values)
np.random.seed(12345)
c = Join_Counts(self.y, self.w, permutations=999)
self.assertAlmostEquals(bb, c.bb)
self.assertAlmostEquals(bw, c.bw)
self.assertAlmostEquals(bb_p, c.p_sim_bb)
self.assertAlmostEquals(bw_p, c.p_sim_bw)
suite = unittest.TestSuite()
test_classes = [Join_Counts_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
ah391/sc-python | datalook.py | 1 | 1722 |
# coding: utf-8
import sys
import numpy
import matplotlib.pyplot
def analyse(filename, outfile=None):
"""Load data and create plots.
Subplots with placeholders, with set lables, layout tight
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
# Create a wide figure to hold the subplots
fig = matplotlib.pyplot.figure(figsize=(10.3, 3.0))
# create placeholders for plots
subplot1 = fig.add_subplot (1,3,1)
subplot2 = fig.add_subplot (1,3,2)
subplot3 = fig.add_subplot (1,3,3)
subplot1.set_ylabel('average')
subplot1.plot(numpy.mean(data, axis=0))
subplot2.set_ylabel('maximum')
subplot2.plot(numpy.max(data, axis=0))
subplot3.set_ylabel('minimum')
subplot3.plot(numpy.min(data, axis=0))
fig.tight_layout()
if outfile is None:
matplotlib.pyplot.show()
else:
matplotlib.pyplot.savefig(outfile)
def detect_problems(filename):
"""Some of our temperature files have problems, check for these
This function reads a file (filename argument) and reports on odd looking maxima and minima that add up to 0.
This seems to happen when the sensors break.
The function does not return any data.
"""
data = numpy.loadtxt(fname=filename, delimiter=',')
if numpy.max(data, axis=0)[0] ==0 and numpy.max(data, axis=0)[20]==20:
print("Suspicious looking maxima")
elif numpy.sum(numpy.min(data, axis=0))==0:
print("Minima add up to zero")
else:
print("Data looks OK")
if __name__ == "__main__":
print("Running",sys.argv[0])
print(sys.argv[1])
analyse(sys.argv[1], outfile=sys.argv[2])
detect_problems(sys.argv[1])
| mit |
trungnt13/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 47 | 8095 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
yilei0620/3D_Conditional_Gan | lib/data_utils.py | 1 | 1596 | import numpy as np
from sklearn import utils as skutils
from rng import np_rng, py_rng
def center_crop(x, ph, pw=None):
if pw is None:
pw = ph
h, w = x.shape[:2]
j = int(round((h - ph)/2.))
i = int(round((w - pw)/2.))
return x[j:j+ph, i:i+pw]
def patch(x, ph, pw=None):
if pw is None:
pw = ph
h, w = x.shape[:2]
j = py_rng.randint(0, h-ph)
i = py_rng.randint(0, w-pw)
x = x[j:j+ph, i:i+pw]
return x
def list_shuffle(*data):
idxs = np_rng.permutation(np.arange(len(data[0])))
if len(data) == 1:
return [data[0][idx] for idx in idxs]
else:
return [[d[idx] for idx in idxs] for d in data]
def shuffle(*arrays, **options):
if isinstance(arrays[0][0], basestring):
return list_shuffle(*arrays)
else:
return skutils.shuffle(*arrays, random_state=np_rng)
def OneHot(X, n=None, negative_class=0.):
X = np.asarray(X).flatten()
if n is None:
n = np.max(X) + 1
Xoh = np.ones((len(X), n)) * negative_class
Xoh[np.arange(len(X)), X] = 1.
return Xoh
def iter_data(*data, **kwargs):
size = kwargs.get('size', 128)
n = kwargs.get('ndata',0)
sIndex = kwargs.get('shuffle_index',[])
batches = n / size
if n % size != 0:
batches += 1
for b in range(batches):
start = b * size
end = (b + 1) * size
if end > n:
end = n
if len(data) == 1:
yield data[0][start:end]
else:
# print sIndex[start:end]
yield tuple([d[sIndex[start:end]] for d in data])
| mit |
martatolos/DemandAnalysis | visualizations.py | 1 | 5518 | # Plots
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
import re
import random
def plot_several_countries(df, ylabel, title, country_list="", save=False, num="", xticks_hourly=False, kind='bar', linestyle='-', color='mbygcr', marker='o', linewidth=4.0, fontsize=16, legend=True):
"""
This function plots a dataframe with several countries
@param df: data frame
@param ylabel: label for y axis
@param title: graphic title
@param kind: graphic type ex: bar or line
@param linestyle: lines style
@param color: color to use
@param marker: shape of point on a line
@param linewidth: line width
@param fontsize: font size
@return: n/a
"""
# Plotting
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
if xticks_hourly:
xticks_hourly = range(0,24)
else:
xticks_hourly = None
### PLOT FINAL
if kind == 'line':
graphic = df.plot(title=title, kind=kind, fontsize=fontsize, linestyle=linestyle, color=color,
linewidth=linewidth, marker=marker, xticks=xticks_hourly, figsize=(18,9))
else:
graphic = df.plot(title=title, kind=kind, fontsize=fontsize, color=color,
xticks=xticks_hourly, figsize=(18,9))
if legend == False:
graphic.legend_.remove()
graphic.set_ylabel(ylabel)
graphic.legend(prop={'size': 12})
if save==True and country_list!="":
namefile= re.sub("[\'\",\[\]]", "", str(country_list))
namefile= re.sub("[\s+]", "-", namefile)
if num=="":
num = random.randrange(1,100)
plt.savefig(namefile+str(num))
else:
plt.show()
def plot_yearly_consumption(df, country, kind='bar', linestyle='-', color='blue', marker='o', linewidth=4.0,fontsize=16):
"""
This function plots the yearly data from a monthlypowerconsumptions data frame
@param df: monthlypowerconsumptions data frame
@param df: country name to add on the title of the plot
@return: n/a
"""
# Plotting
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
### PLOT FINAL
if kind == 'line':
graphic = df.plot(x='year', y='Sum', title='Evolution of electricity consumption in '+ country, kind=kind, fontsize=fontsize, linestyle=linestyle, color=color , marker=marker)
else:
graphic = df.plot(x='year', y='Sum', title='Evolution of electricity consumption in '+ country, kind=kind, fontsize=fontsize, color=color)
graphic.set_ylabel('GWh')
plt.show()
def plot_monthly_average_consumption(mpc, country_list, ylabel='normalized', title='', kind='bar', linestyle='-', color='mbygcr', marker='o', linewidth=4.0, fontsize=16, legend=True):
"""
This function plots the yearly data from a monthlypowerconsumptions object
@param df: monthlypowerconsumptions object
@param country_list: country names to add on the title of the plot
@param ylabel: label for y axis
@param title: graphic title
@param kind: graphic type ex: bar or line
@param linestyle: lines style
@param color: color to use
@param marker: shape of point on a line
@param linewidth: line width
@param fontsize: font size
@return: n/a
"""
# Plotting
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
df = mpc.data_normalization(year=False)
df = df.groupby('country').mean()
del df['year']
del df['Sum']
df = df.T
plot_several_countries(df[country_list], ylabel, title, kind=kind, linestyle=linestyle, color=color, marker=marker, linewidth=linewidth, fontsize=fontsize, legend=legend)
def plot_average_week(df, ylabel='Normalized', title="Normalized average weekday consumption",kind='bar', color='rbbbbgg', rotation=50, legend=True):
# Plotting
"""
@param df: Data frame with the values to plot
@param ylabel: Label for the y axis
@param title: Title for the graphic
@param kind: Type of graphic: bar, line,...
@param color: color values
@param rotation: degrees for the ylabel rotation
@param legend: True or False legend on or off
"""
font = {'family' : 'normal',
'weight' : 'bold',
'size' : 12}
matplotlib.rc('font', **font)
#create a dictionary for the week days
dayDict={0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thrusday', 4:'Friday', 5:'Saturday', 6:'Sunday'}
df = df[['Country', 'weekday', 'daily']]
df = df.pivot(index='weekday', columns='Country')
df = df.rename(index=dayDict)
df.columns = df.columns.droplevel()
# normalized
df = df/df.mean()
graphic = df.plot(title=title, kind=kind, color=color, legend=legend)
graphic.set_ylabel(ylabel)
graphic.legend(prop={'size': 12})
plt.xticks(rotation=rotation)
plt.show()
# #### PLOT FINAL
# # Plot the infaltion with the spanish consumption
# ES_info_year = ES_info[['year','Sum','inflation']]
# ES_info_year.set_index('year')
# plt.figure()
# ax = ES_info_year.plot(x='year', title='Consumption and Inflation in Spain', y='Sum', kind='bar',fontsize=16)
# ax.set_ylabel('Consumption - GWh')
# ax2 = ax.twinx()
# ax2.plot(ES_info_year['inflation'].values, linestyle='-', color='red', marker='o', linewidth=4.0)
# ax2.set_ylabel('Inflation - Annual Change [%]')
# plt.show() | gpl-2.0 |
mayblue9/scikit-learn | benchmarks/bench_multilabel_metrics.py | 276 | 7138 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': partial(f1_score, average='micro'),
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: {}'.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
Zing22/uemscode | tmp_test.py | 1 | 1634 | # -*- coding=utf-8 -*-
#### for testing steps
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
from sklearn.externals import joblib
from PIL import Image
from process import toBin, cropLetters
from img2feature import toFeature
from main import readAllFiles
TEMP_DIR = 'tmp/'
def test_onePic():
path = input('Pic path:')
img = Image.open(path)
bimg = toBin(img)
bimg.save(TEMP_DIR+ 'bimg.jpg')
success, letters = cropLetters(bimg)
if not success:
print('Crop failed.')
print(letters)
return
features = []
for l in letters:
features.append([int(x) for x in toFeature(l).split(' ')])
l.save(TEMP_DIR + '%d.jpg' % len(features))
pre = clf.predict(features)
code = ''.join([chr(x + ord('A')) for x in pre])
print(code)
def test_tmp_dir():
filenames = readAllFiles(TEMP_DIR)
for file in filenames:
img = Image.open(TEMP_DIR + file)
bimg = toBin(img)
bimg.save(TEMP_DIR + 'tmp_' + file)
success, letters = cropLetters(bimg)
if not success:
print('Crop failed.')
print(letters)
return
features = []
for l in letters:
features.append([int(x) for x in toFeature(l).split(' ')])
# l.save(TEMP_DIR + '%d.jpg' % len(features))
pre = clf.predict(features)
code = ''.join([chr(x + ord('A')) for x in pre])
print(code)
SAVE_TO = 'model.pkl'
def main():
global clf
clf = joblib.load(SAVE_TO)
test_onePic()
# test_tmp_dir()
if __name__ == '__main__':
main() | mit |
RPGOne/Skynet | scikit-learn-0.18.1/sklearn/linear_model/tests/test_ransac.py | 52 | 17482 | from scipy import sparse
import numpy as np
from scipy import sparse
from numpy.testing import assert_equal, assert_raises
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from sklearn.utils import check_random_state
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_almost_equal
from sklearn.linear_model import LinearRegression, RANSACRegressor, Lasso
from sklearn.linear_model.ransac import _dynamic_max_trials
# Generate coordinates of line
X = np.arange(-200, 200)
y = 0.2 * X + 20
data = np.column_stack([X, y])
# Add some faulty data
outliers = np.array((10, 30, 200))
data[outliers[0], :] = (1000, 1000)
data[outliers[1], :] = (-1000, -1000)
data[outliers[2], :] = (-100, -50)
X = data[:, 0][:, np.newaxis]
y = data[:, 1]
def test_ransac_inliers_outliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_is_data_valid():
def is_data_valid(X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
X = np.random.rand(10, 2)
y = np.random.rand(10, 1)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_data_valid=is_data_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_is_model_valid():
def is_model_valid(estimator, X, y):
assert_equal(X.shape[0], 2)
assert_equal(y.shape[0], 2)
return False
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5,
is_model_valid=is_model_valid,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_max_trials():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=0,
random_state=0)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, max_trials=11,
random_state=0)
assert getattr(ransac_estimator, 'n_trials_', None) is None
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 2)
def test_ransac_stop_n_inliers():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_n_inliers=2,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_stop_score():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, stop_score=0,
random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.n_trials_, 1)
def test_ransac_score():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.score(X[2:], y[2:]), 1)
assert_less(ransac_estimator.score(X[:2], y[:2]), 1)
def test_ransac_predict():
X = np.arange(100)[:, None]
y = np.zeros((100, ))
y[0] = 1
y[1] = 100
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.5, random_state=0)
ransac_estimator.fit(X, y)
assert_equal(ransac_estimator.predict(X), np.zeros(100))
def test_ransac_resid_thresh_no_inliers():
# When residual_threshold=0.0 there are no inliers and a
# ValueError with a message should be raised
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=0.0, random_state=0)
assert_raises_regexp(ValueError,
"No inliers.*residual_threshold.*0\.0",
ransac_estimator.fit, X, y)
def test_ransac_sparse_coo():
X_sparse = sparse.coo_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csr():
X_sparse = sparse.csr_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_sparse_csc():
X_sparse = sparse.csc_matrix(X)
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator.fit(X_sparse, y)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_none_estimator():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_none_estimator = RANSACRegressor(None, 2, 5, random_state=0)
ransac_estimator.fit(X, y)
ransac_none_estimator.fit(X, y)
assert_array_almost_equal(ransac_estimator.predict(X),
ransac_none_estimator.predict(X))
def test_ransac_min_n_samples():
base_estimator = LinearRegression()
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator2 = RANSACRegressor(base_estimator,
min_samples=2. / X.shape[0],
residual_threshold=5, random_state=0)
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=-1,
residual_threshold=5, random_state=0)
ransac_estimator4 = RANSACRegressor(base_estimator, min_samples=5.2,
residual_threshold=5, random_state=0)
ransac_estimator5 = RANSACRegressor(base_estimator, min_samples=2.0,
residual_threshold=5, random_state=0)
ransac_estimator6 = RANSACRegressor(base_estimator,
residual_threshold=5, random_state=0)
ransac_estimator7 = RANSACRegressor(base_estimator,
min_samples=X.shape[0] + 1,
residual_threshold=5, random_state=0)
ransac_estimator1.fit(X, y)
ransac_estimator2.fit(X, y)
ransac_estimator5.fit(X, y)
ransac_estimator6.fit(X, y)
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator2.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator5.predict(X))
assert_array_almost_equal(ransac_estimator1.predict(X),
ransac_estimator6.predict(X))
assert_raises(ValueError, ransac_estimator3.fit, X, y)
assert_raises(ValueError, ransac_estimator4.fit, X, y)
assert_raises(ValueError, ransac_estimator7.fit, X, y)
def test_ransac_multi_dimensional_targets():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
# 3-D target values
yyy = np.column_stack([y, y, y])
# Estimate parameters of corrupted data
ransac_estimator.fit(X, yyy)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# XXX: Remove in 0.20
def test_ransac_residual_metric():
residual_metric1 = lambda dy: np.sum(np.abs(dy), axis=1)
residual_metric2 = lambda dy: np.sum(dy ** 2, axis=1)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
residual_metric=residual_metric2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
assert_warns(DeprecationWarning, ransac_estimator1.fit, X, yyy)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
assert_warns(DeprecationWarning, ransac_estimator2.fit, X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_residual_loss():
loss_multi1 = lambda y_true, y_pred: np.sum(np.abs(y_true - y_pred), axis=1)
loss_multi2 = lambda y_true, y_pred: np.sum((y_true - y_pred) ** 2, axis=1)
loss_mono = lambda y_true, y_pred : np.abs(y_true - y_pred)
yyy = np.column_stack([y, y, y])
base_estimator = LinearRegression()
ransac_estimator0 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0)
ransac_estimator1 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi1)
ransac_estimator2 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss=loss_multi2)
# multi-dimensional
ransac_estimator0.fit(X, yyy)
ransac_estimator1.fit(X, yyy)
ransac_estimator2.fit(X, yyy)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator1.predict(X))
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
# one-dimensional
ransac_estimator0.fit(X, y)
ransac_estimator2.loss = loss_mono
ransac_estimator2.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
ransac_estimator3 = RANSACRegressor(base_estimator, min_samples=2,
residual_threshold=5, random_state=0,
loss="squared_loss")
ransac_estimator3.fit(X, y)
assert_array_almost_equal(ransac_estimator0.predict(X),
ransac_estimator2.predict(X))
def test_ransac_default_residual_threshold():
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
random_state=0)
# Estimate parameters of corrupted data
ransac_estimator.fit(X, y)
# Ground truth / reference inlier mask
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
assert_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
def test_ransac_dynamic_max_trials():
# Numbers hand-calculated and confirmed on page 119 (Table 4.3) in
# Hartley, R.~I. and Zisserman, A., 2004,
# Multiple View Geometry in Computer Vision, Second Edition,
# Cambridge University Press, ISBN: 0521540518
# e = 0%, min_samples = X
assert_equal(_dynamic_max_trials(100, 100, 2, 0.99), 1)
# e = 5%, min_samples = 2
assert_equal(_dynamic_max_trials(95, 100, 2, 0.99), 2)
# e = 10%, min_samples = 2
assert_equal(_dynamic_max_trials(90, 100, 2, 0.99), 3)
# e = 30%, min_samples = 2
assert_equal(_dynamic_max_trials(70, 100, 2, 0.99), 7)
# e = 50%, min_samples = 2
assert_equal(_dynamic_max_trials(50, 100, 2, 0.99), 17)
# e = 5%, min_samples = 8
assert_equal(_dynamic_max_trials(95, 100, 8, 0.99), 5)
# e = 10%, min_samples = 8
assert_equal(_dynamic_max_trials(90, 100, 8, 0.99), 9)
# e = 30%, min_samples = 8
assert_equal(_dynamic_max_trials(70, 100, 8, 0.99), 78)
# e = 50%, min_samples = 8
assert_equal(_dynamic_max_trials(50, 100, 8, 0.99), 1177)
# e = 0%, min_samples = 10
assert_equal(_dynamic_max_trials(1, 100, 10, 0), 0)
assert_equal(_dynamic_max_trials(1, 100, 10, 1), float('inf'))
base_estimator = LinearRegression()
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=-0.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
ransac_estimator = RANSACRegressor(base_estimator, min_samples=2,
stop_probability=1.1)
assert_raises(ValueError, ransac_estimator.fit, X, y)
def test_ransac_fit_sample_weight():
ransac_estimator = RANSACRegressor(random_state=0)
n_samples = y.shape[0]
weights = np.ones(n_samples)
ransac_estimator.fit(X, y, weights)
# sanity check
assert_equal(ransac_estimator.inlier_mask_.shape[0], n_samples)
ref_inlier_mask = np.ones_like(ransac_estimator.inlier_mask_
).astype(np.bool_)
ref_inlier_mask[outliers] = False
# check that mask is correct
assert_array_equal(ransac_estimator.inlier_mask_, ref_inlier_mask)
# check that fit(X) = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
# X = X1 repeated n1 times, X2 repeated n2 times and so forth
random_state = check_random_state(0)
X_ = random_state.randint(0, 200, [10, 1])
y_ = np.ndarray.flatten(0.2 * X_ + 2)
sample_weight = random_state.randint(0, 10, 10)
outlier_X = random_state.randint(0, 1000, [1, 1])
outlier_weight = random_state.randint(0, 10, 1)
outlier_y = random_state.randint(-1000, 0, 1)
X_flat = np.append(np.repeat(X_, sample_weight, axis=0),
np.repeat(outlier_X, outlier_weight, axis=0), axis=0)
y_flat = np.ndarray.flatten(np.append(np.repeat(y_, sample_weight, axis=0),
np.repeat(outlier_y, outlier_weight, axis=0),
axis=0))
ransac_estimator.fit(X_flat, y_flat)
ref_coef_ = ransac_estimator.estimator_.coef_
sample_weight = np.append(sample_weight, outlier_weight)
X_ = np.append(X_, outlier_X, axis=0)
y_ = np.append(y_, outlier_y)
ransac_estimator.fit(X_, y_, sample_weight)
assert_almost_equal(ransac_estimator.estimator_.coef_, ref_coef_)
# check that if base_estimator.fit doesn't support
# sample_weight, raises error
base_estimator = Lasso()
ransac_estimator = RANSACRegressor(base_estimator)
assert_raises(ValueError, ransac_estimator.fit, X, y, weights)
| bsd-3-clause |
adamhajari/spyre | tests/test_app.py | 1 | 5046 | # from spyre import server
from spyre import server
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from numpy import pi
class TestApp(server.App):
colors = [
{"label": "Green", "value": 'g'},
{"label": "Red", "value": 'r'},
{"label": "Blue", "value": 'b'},
{"label": "Yellow", "value": 'y'},
]
on_demand_streaming_services = [
{"label": "Spotify", "value": 's'},
{"label": "Apple Music", "value": 'a'},
]
title = "Simple Sine Wave"
inputs = [
{
"input_type": 'text',
"label": 'Title',
"value": 'Simple Sine Wave',
"variable_name": 'title',
"action_id": "plot",
}, {
"input_type": 'radiobuttons',
"label": 'Function',
"options": [
{"label": "Sine", "value": "sin", "checked": True},
{"label": "Cosine", "value": "cos"}
],
"variable_name": 'func_type',
"action_id": "plot",
}, {
"input_type": 'checkboxgroup',
"label": 'Axis Labels',
"options": [
{"label": "x-axis", "value": 1, "checked": True},
{"label": "y-axis", "value": 2}
],
"variable_name": 'axis_label',
"action_id": "plot",
}, {
"input_type": 'dropdown',
"label": 'Line Color',
"options": colors,
"variable_name": 'color',
"value": "b",
"action_id": "plot",
}, {
"input_type": 'dropdown',
"label": 'On-Demand Streaming Service',
"options": on_demand_streaming_services,
"variable_name": 'on_demand_streaming_service',
"action_id": "plot",
}, {
"input_type": 'slider',
"label": 'frequency',
"variable_name": 'freq',
"value": 2,
"min": 1,
"max": 30,
"action_id": "plot",
}
]
controls = [
{
"control_type": "button",
"control_id": "button1",
"label": "plot",
}, {
"control_type": "button",
"control_id": "button2",
"label": "download",
}
]
outputs = [
{
"output_type": "html",
"output_id": "html1",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "plot",
"output_id": "plot",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "plot",
"output_id": "plot2",
"control_id": "button1",
"on_page_load": True,
}, {
"output_type": "table",
"output_id": "table_id",
"control_id": "button1",
"sortable": True,
"on_page_load": True,
}, {
"output_type": "download",
"output_id": "download_id",
"control_id": "button2",
}
]
def plot1(self, params):
fig = plt.figure() # make figure object
splt = fig.add_subplot(1, 1, 1)
f = float(params['freq'])
title = params['title']
axis_label = map(int, params['axis_label'])
color = params['color']
func_type = params['func_type']
x = np.arange(0, 6 * pi, pi / 50)
splt.set_title(title)
for axis in axis_label:
if axis == 1:
splt.set_xlabel('x axis')
if axis == 2:
splt.set_ylabel('y axis')
if func_type == 'cos':
y = np.cos(f * x)
else:
y = np.sin(f * x)
splt.plot(x, y, color=color) # sine wave
return fig
def plot2(self, params):
data = self.getData(params)
fig = plt.figure() # make figure object
splt = fig.add_subplot(1, 1, 1)
ind = np.arange(len(data['name']))
width = 0.85
splt.bar(ind, data['count'], width)
splt.set_xticks(ind + width / 2)
splt.set_xticklabels(["A", "B", "C"])
return fig
def html1(self, params):
return "hello world"
def html2(self, params):
func_type = params['func_type']
axis_label = params['axis_label']
color = params['color']
freq = params['freq']
html = (
"function type: {} <br>axis label: {}<br>color: {}<br>frequency: {}"
.format(func_type, axis_label, color, freq)
)
return html
def getJsonData(self, params):
count = [1, 4, 3]
name = ['<a href="http://adamhajari.com">A</a>', 'B', 'C']
return {'name': name, 'count': count}
def getData(self, params):
data = self.getJsonData(params)
df = pd.DataFrame(data)
return df
def noOutput(self, input_params):
return 0
# app = TestApp()
# app.launch()
| mit |
jjx02230808/project0223 | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/metrics/tests/test_regression.py | 272 | 6066 | from __future__ import division, print_function
import numpy as np
from itertools import product
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.metrics import explained_variance_score
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import r2_score
from sklearn.metrics.regression import _check_reg_targets
def test_regression_metrics(n_samples=50):
y_true = np.arange(n_samples)
y_pred = y_true + 1
assert_almost_equal(mean_squared_error(y_true, y_pred), 1.)
assert_almost_equal(mean_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(median_absolute_error(y_true, y_pred), 1.)
assert_almost_equal(r2_score(y_true, y_pred), 0.995, 2)
assert_almost_equal(explained_variance_score(y_true, y_pred), 1.)
def test_multioutput_regression():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0, 0, 1], [1, 0, 1, 1], [0, 0, 0, 1]])
error = mean_squared_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
error = mean_absolute_error(y_true, y_pred)
assert_almost_equal(error, (1. / 3 + 2. / 3 + 2. / 3) / 4.)
error = r2_score(y_true, y_pred, multioutput='variance_weighted')
assert_almost_equal(error, 1. - 5. / 2)
error = r2_score(y_true, y_pred, multioutput='uniform_average')
assert_almost_equal(error, -.875)
def test_regression_metrics_at_limits():
assert_almost_equal(mean_squared_error([0.], [0.]), 0.00, 2)
assert_almost_equal(mean_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(median_absolute_error([0.], [0.]), 0.00, 2)
assert_almost_equal(explained_variance_score([0.], [0.]), 1.00, 2)
assert_almost_equal(r2_score([0., 1], [0., 1]), 1.00, 2)
def test__check_reg_targets():
# All of length 3
EXAMPLES = [
("continuous", [1, 2, 3], 1),
("continuous", [[1], [2], [3]], 1),
("continuous-multioutput", [[1, 1], [2, 2], [3, 1]], 2),
("continuous-multioutput", [[5, 1], [4, 2], [3, 1]], 2),
("continuous-multioutput", [[1, 3, 4], [2, 2, 2], [3, 1, 1]], 3),
]
for (type1, y1, n_out1), (type2, y2, n_out2) in product(EXAMPLES,
repeat=2):
if type1 == type2 and n_out1 == n_out2:
y_type, y_check1, y_check2, multioutput = _check_reg_targets(
y1, y2, None)
assert_equal(type1, y_type)
if type1 == 'continuous':
assert_array_equal(y_check1, np.reshape(y1, (-1, 1)))
assert_array_equal(y_check2, np.reshape(y2, (-1, 1)))
else:
assert_array_equal(y_check1, y1)
assert_array_equal(y_check2, y2)
else:
assert_raises(ValueError, _check_reg_targets, y1, y2, None)
def test_regression_multioutput_array():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [0.125, 0.5625], decimal=2)
assert_array_almost_equal(mae, [0.25, 0.625], decimal=2)
assert_array_almost_equal(r, [0.95, 0.93], decimal=2)
assert_array_almost_equal(evs, [0.95, 0.93], decimal=2)
# mean_absolute_error and mean_squared_error are equal because
# it is a binary problem.
y_true = [[0, 0]]*4
y_pred = [[1, 1]]*4
mse = mean_squared_error(y_true, y_pred, multioutput='raw_values')
mae = mean_absolute_error(y_true, y_pred, multioutput='raw_values')
r = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(mse, [1., 1.], decimal=2)
assert_array_almost_equal(mae, [1., 1.], decimal=2)
assert_array_almost_equal(r, [0., 0.], decimal=2)
r = r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]], multioutput='raw_values')
assert_array_almost_equal(r, [0, -3.5], decimal=2)
assert_equal(np.mean(r), r2_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='uniform_average'))
evs = explained_variance_score([[0, -1], [0, 1]], [[2, 2], [1, 1]],
multioutput='raw_values')
assert_array_almost_equal(evs, [0, -1.25], decimal=2)
# Checking for the condition in which both numerator and denominator is
# zero.
y_true = [[1, 3], [-1, 2]]
y_pred = [[1, 4], [-1, 1]]
r2 = r2_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(r2, [1., -3.], decimal=2)
assert_equal(np.mean(r2), r2_score(y_true, y_pred,
multioutput='uniform_average'))
evs = explained_variance_score(y_true, y_pred, multioutput='raw_values')
assert_array_almost_equal(evs, [1., -3.], decimal=2)
assert_equal(np.mean(evs), explained_variance_score(y_true, y_pred))
def test_regression_custom_weights():
y_true = [[1, 2], [2.5, -1], [4.5, 3], [5, 7]]
y_pred = [[1, 1], [2, -1], [5, 4], [5, 6.5]]
msew = mean_squared_error(y_true, y_pred, multioutput=[0.4, 0.6])
maew = mean_absolute_error(y_true, y_pred, multioutput=[0.4, 0.6])
rw = r2_score(y_true, y_pred, multioutput=[0.4, 0.6])
evsw = explained_variance_score(y_true, y_pred, multioutput=[0.4, 0.6])
assert_almost_equal(msew, 0.39, decimal=2)
assert_almost_equal(maew, 0.475, decimal=3)
assert_almost_equal(rw, 0.94, decimal=2)
assert_almost_equal(evsw, 0.94, decimal=2)
| bsd-3-clause |
kenshay/ImageScripter | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/io/stata.py | 7 | 82769 | """
Module contains tools for processing Stata files into DataFrames
The StataReader below was originally written by Joe Presbrey as part of PyDTA.
It has been extended and improved by Skipper Seabold from the Statsmodels
project who also developed the StataWriter and was finally added to pandas in
a once again improved version.
You can find more information on http://presbrey.mit.edu/PyDTA and
http://www.statsmodels.org/devel/
"""
import numpy as np
import sys
import struct
from dateutil.relativedelta import relativedelta
from pandas.types.common import (is_categorical_dtype, is_datetime64_dtype,
_ensure_object)
from pandas.core.base import StringMixin
from pandas.core.categorical import Categorical
from pandas.core.frame import DataFrame
from pandas.core.series import Series
import datetime
from pandas import compat, to_timedelta, to_datetime, isnull, DatetimeIndex
from pandas.compat import lrange, lmap, lzip, text_type, string_types, range, \
zip, BytesIO
from pandas.util.decorators import Appender
import pandas as pd
from pandas.io.common import get_filepath_or_buffer, BaseIterator
from pandas.lib import max_len_string_array, infer_dtype
from pandas.tslib import NaT, Timestamp
_version_error = ("Version of given Stata file is not 104, 105, 108, "
"111 (Stata 7SE), 113 (Stata 8/9), 114 (Stata 10/11), "
"115 (Stata 12), 117 (Stata 13), or 118 (Stata 14)")
_statafile_processing_params1 = """\
convert_dates : boolean, defaults to True
Convert date variables to DataFrame time values
convert_categoricals : boolean, defaults to True
Read value labels and convert columns to Categorical/Factor variables"""
_encoding_params = """\
encoding : string, None or encoding
Encoding used to parse the files. None defaults to iso-8859-1."""
_statafile_processing_params2 = """\
index : identifier of index column
identifier of column that should be used as index of the DataFrame
convert_missing : boolean, defaults to False
Flag indicating whether to convert missing values to their Stata
representations. If False, missing values are replaced with nans.
If True, columns containing missing values are returned with
object data types and missing values are represented by
StataMissingValue objects.
preserve_dtypes : boolean, defaults to True
Preserve Stata datatypes. If False, numeric data are upcast to pandas
default types for foreign data (float64 or int64)
columns : list or None
Columns to retain. Columns will be returned in the given order. None
returns all columns
order_categoricals : boolean, defaults to True
Flag indicating whether converted categorical data are ordered."""
_chunksize_params = """\
chunksize : int, default None
Return StataReader object for iterations, returns chunks with
given number of lines"""
_iterator_params = """\
iterator : boolean, default False
Return StataReader object"""
_read_stata_doc = """Read Stata file into DataFrame
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
%s
Returns
-------
DataFrame or StataReader
Examples
--------
Read a Stata dta file:
>>> df = pandas.read_stata('filename.dta')
Read a Stata dta file in 10,000 line chunks:
>>> itr = pandas.read_stata('filename.dta', chunksize=10000)
>>> for chunk in itr:
>>> do_something(chunk)
""" % (_statafile_processing_params1, _encoding_params,
_statafile_processing_params2, _chunksize_params,
_iterator_params)
_data_method_doc = """Reads observations from Stata file, converting them into a dataframe
This is a legacy method. Use `read` in new code.
Parameters
----------
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_read_method_doc = """\
Reads observations from Stata file, converting them into a dataframe
Parameters
----------
nrows : int
Number of lines to read from data file, if None read whole file.
%s
%s
Returns
-------
DataFrame
""" % (_statafile_processing_params1, _statafile_processing_params2)
_stata_reader_doc = """\
Class for reading Stata dta files.
Parameters
----------
path_or_buf : string or file-like object
Path to .dta file or object implementing a binary read() functions
%s
%s
%s
%s
""" % (_statafile_processing_params1, _statafile_processing_params2,
_encoding_params, _chunksize_params)
@Appender(_read_stata_doc)
def read_stata(filepath_or_buffer, convert_dates=True,
convert_categoricals=True, encoding=None, index=None,
convert_missing=False, preserve_dtypes=True, columns=None,
order_categoricals=True, chunksize=None, iterator=False):
reader = StataReader(filepath_or_buffer,
convert_dates=convert_dates,
convert_categoricals=convert_categoricals,
index=index, convert_missing=convert_missing,
preserve_dtypes=preserve_dtypes,
columns=columns,
order_categoricals=order_categoricals,
chunksize=chunksize, encoding=encoding)
if iterator or chunksize:
data = reader
else:
data = reader.read()
reader.close()
return data
_date_formats = ["%tc", "%tC", "%td", "%d", "%tw", "%tm", "%tq", "%th", "%ty"]
stata_epoch = datetime.datetime(1960, 1, 1)
def _stata_elapsed_date_to_datetime_vec(dates, fmt):
"""
Convert from SIF to datetime. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
The Stata Internal Format date to convert to datetime according to fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
Returns
Returns
-------
converted : Series
The converted dates
Examples
--------
>>> import pandas as pd
>>> dates = pd.Series([52])
>>> _stata_elapsed_date_to_datetime_vec(dates , "%tw")
0 1961-01-01
dtype: datetime64[ns]
Notes
-----
datetime/c - tc
milliseconds since 01jan1960 00:00:00.000, assuming 86,400 s/day
datetime/C - tC - NOT IMPLEMENTED
milliseconds since 01jan1960 00:00:00.000, adjusted for leap seconds
date - td
days since 01jan1960 (01jan1960 = 0)
weekly date - tw
weeks since 1960w1
This assumes 52 weeks in a year, then adds 7 * remainder of the weeks.
The datetime value is the start of the week in terms of days in the
year, not ISO calendar weeks.
monthly date - tm
months since 1960m1
quarterly date - tq
quarters since 1960q1
half-yearly date - th
half-years since 1960h1 yearly
date - ty
years since 0000
If you don't have pandas with datetime support, then you can't do
milliseconds accurately.
"""
MIN_YEAR, MAX_YEAR = Timestamp.min.year, Timestamp.max.year
MAX_DAY_DELTA = (Timestamp.max - datetime.datetime(1960, 1, 1)).days
MIN_DAY_DELTA = (Timestamp.min - datetime.datetime(1960, 1, 1)).days
MIN_MS_DELTA = MIN_DAY_DELTA * 24 * 3600 * 1000
MAX_MS_DELTA = MAX_DAY_DELTA * 24 * 3600 * 1000
def convert_year_month_safe(year, month):
"""
Convert year and month to datetimes, using pandas vectorized versions
when the date range falls within the range supported by pandas. Other
wise it falls back to a slower but more robust method using datetime.
"""
if year.max() < MAX_YEAR and year.min() > MIN_YEAR:
return to_datetime(100 * year + month, format='%Y%m')
else:
index = getattr(year, 'index', None)
return Series(
[datetime.datetime(y, m, 1) for y, m in zip(year, month)],
index=index)
def convert_year_days_safe(year, days):
"""
Converts year (e.g. 1999) and days since the start of the year to a
datetime or datetime64 Series
"""
if year.max() < (MAX_YEAR - 1) and year.min() > MIN_YEAR:
return (to_datetime(year, format='%Y') +
to_timedelta(days, unit='d'))
else:
index = getattr(year, 'index', None)
value = [datetime.datetime(y, 1, 1) + relativedelta(days=int(d))
for y, d in zip(year, days)]
return Series(value, index=index)
def convert_delta_safe(base, deltas, unit):
"""
Convert base dates and deltas to datetimes, using pandas vectorized
versions if the deltas satisfy restrictions required to be expressed
as dates in pandas.
"""
index = getattr(deltas, 'index', None)
if unit == 'd':
if deltas.max() > MAX_DAY_DELTA or deltas.min() < MIN_DAY_DELTA:
values = [base + relativedelta(days=int(d)) for d in deltas]
return Series(values, index=index)
elif unit == 'ms':
if deltas.max() > MAX_MS_DELTA or deltas.min() < MIN_MS_DELTA:
values = [base + relativedelta(microseconds=(int(d) * 1000))
for d in deltas]
return Series(values, index=index)
else:
raise ValueError('format not understood')
base = to_datetime(base)
deltas = to_timedelta(deltas, unit=unit)
return base + deltas
# TODO: If/when pandas supports more than datetime64[ns], this should be
# improved to use correct range, e.g. datetime[Y] for yearly
bad_locs = np.isnan(dates)
has_bad_values = False
if bad_locs.any():
has_bad_values = True
data_col = Series(dates)
data_col[bad_locs] = 1.0 # Replace with NaT
dates = dates.astype(np.int64)
if fmt in ["%tc", "tc"]: # Delta ms relative to base
base = stata_epoch
ms = dates
conv_dates = convert_delta_safe(base, ms, 'ms')
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Encountered %tC format. Leaving in Stata Internal Format.")
conv_dates = Series(dates, dtype=np.object)
if has_bad_values:
conv_dates[bad_locs] = pd.NaT
return conv_dates
elif fmt in ["%td", "td", "%d", "d"]: # Delta days relative to base
base = stata_epoch
days = dates
conv_dates = convert_delta_safe(base, days, 'd')
elif fmt in ["%tw", "tw"]: # does not count leap days - 7 days is a week
year = stata_epoch.year + dates // 52
days = (dates % 52) * 7
conv_dates = convert_year_days_safe(year, days)
elif fmt in ["%tm", "tm"]: # Delta months relative to base
year = stata_epoch.year + dates // 12
month = (dates % 12) + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%tq", "tq"]: # Delta quarters relative to base
year = stata_epoch.year + dates // 4
month = (dates % 4) * 3 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%th", "th"]: # Delta half-years relative to base
year = stata_epoch.year + dates // 2
month = (dates % 2) * 6 + 1
conv_dates = convert_year_month_safe(year, month)
elif fmt in ["%ty", "ty"]: # Years -- not delta
year = dates
month = np.ones_like(dates)
conv_dates = convert_year_month_safe(year, month)
else:
raise ValueError("Date fmt %s not understood" % fmt)
if has_bad_values: # Restore NaT for bad values
conv_dates[bad_locs] = NaT
return conv_dates
def _datetime_to_stata_elapsed_vec(dates, fmt):
"""
Convert from datetime to SIF. http://www.stata.com/help.cgi?datetime
Parameters
----------
dates : Series
Series or array containing datetime.datetime or datetime64[ns] to
convert to the Stata Internal Format given by fmt
fmt : str
The format to convert to. Can be, tc, td, tw, tm, tq, th, ty
"""
index = dates.index
NS_PER_DAY = 24 * 3600 * 1000 * 1000 * 1000
US_PER_DAY = NS_PER_DAY / 1000
def parse_dates_safe(dates, delta=False, year=False, days=False):
d = {}
if is_datetime64_dtype(dates.values):
if delta:
delta = dates - stata_epoch
d['delta'] = delta.values.astype(
np.int64) // 1000 # microseconds
if days or year:
dates = DatetimeIndex(dates)
d['year'], d['month'] = dates.year, dates.month
if days:
days = (dates.astype(np.int64) -
to_datetime(d['year'], format='%Y').astype(np.int64))
d['days'] = days // NS_PER_DAY
elif infer_dtype(dates) == 'datetime':
if delta:
delta = dates.values - stata_epoch
f = lambda x: \
US_PER_DAY * x.days + 1000000 * x.seconds + x.microseconds
v = np.vectorize(f)
d['delta'] = v(delta)
if year:
year_month = dates.apply(lambda x: 100 * x.year + x.month)
d['year'] = year_month.values // 100
d['month'] = (year_month.values - d['year'] * 100)
if days:
f = lambda x: (x - datetime.datetime(x.year, 1, 1)).days
v = np.vectorize(f)
d['days'] = v(dates)
else:
raise ValueError('Columns containing dates must contain either '
'datetime64, datetime.datetime or null values.')
return DataFrame(d, index=index)
bad_loc = isnull(dates)
index = dates.index
if bad_loc.any():
dates = Series(dates)
if is_datetime64_dtype(dates):
dates[bad_loc] = to_datetime(stata_epoch)
else:
dates[bad_loc] = stata_epoch
if fmt in ["%tc", "tc"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta / 1000
elif fmt in ["%tC", "tC"]:
from warnings import warn
warn("Stata Internal Format tC not supported.")
conv_dates = dates
elif fmt in ["%td", "td"]:
d = parse_dates_safe(dates, delta=True)
conv_dates = d.delta // US_PER_DAY
elif fmt in ["%tw", "tw"]:
d = parse_dates_safe(dates, year=True, days=True)
conv_dates = (52 * (d.year - stata_epoch.year) + d.days // 7)
elif fmt in ["%tm", "tm"]:
d = parse_dates_safe(dates, year=True)
conv_dates = (12 * (d.year - stata_epoch.year) + d.month - 1)
elif fmt in ["%tq", "tq"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 4 * (d.year - stata_epoch.year) + (d.month - 1) // 3
elif fmt in ["%th", "th"]:
d = parse_dates_safe(dates, year=True)
conv_dates = 2 * (d.year - stata_epoch.year) + \
(d.month > 6).astype(np.int)
elif fmt in ["%ty", "ty"]:
d = parse_dates_safe(dates, year=True)
conv_dates = d.year
else:
raise ValueError("Format %s is not a known Stata date format" % fmt)
conv_dates = Series(conv_dates, dtype=np.float64)
missing_value = struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0]
conv_dates[bad_loc] = missing_value
return Series(conv_dates, index=index)
excessive_string_length_error = """
Fixed width strings in Stata .dta files are limited to 244 (or fewer)
characters. Column '%s' does not satisfy this restriction.
"""
class PossiblePrecisionLoss(Warning):
pass
precision_loss_doc = """
Column converted from %s to %s, and some data are outside of the lossless
conversion range. This may result in a loss of precision in the saved data.
"""
class ValueLabelTypeMismatch(Warning):
pass
value_label_mismatch_doc = """
Stata value labels (pandas categories) must be strings. Column {0} contains
non-string labels which will be converted to strings. Please check that the
Stata data file created has not lost information due to duplicate labels.
"""
class InvalidColumnName(Warning):
pass
invalid_name_doc = """
Not all pandas column names were valid Stata variable names.
The following replacements have been made:
{0}
If this is not what you expect, please make sure you have Stata-compliant
column names in your DataFrame (strings only, max 32 characters, only
alphanumerics and underscores, no Stata reserved words)
"""
def _cast_to_stata_types(data):
"""Checks the dtypes of the columns of a pandas DataFrame for
compatibility with the data types and ranges supported by Stata, and
converts if necessary.
Parameters
----------
data : DataFrame
The DataFrame to check and convert
Notes
-----
Numeric columns in Stata must be one of int8, int16, int32, float32 or
float64, with some additional value restrictions. int8 and int16 columns
are checked for violations of the value restrictions and upcast if needed.
int64 data is not usable in Stata, and so it is downcast to int32 whenever
the value are in the int32 range, and sidecast to float64 when larger than
this range. If the int64 values are outside of the range of those
perfectly representable as float64 values, a warning is raised.
bool columns are cast to int8. uint colums are converted to int of the
same size if there is no loss in precision, other wise are upcast to a
larger type. uint64 is currently not supported since it is concerted to
object in a DataFrame.
"""
ws = ''
# original, if small, if large
conversion_data = ((np.bool, np.int8, np.int8),
(np.uint8, np.int8, np.int16),
(np.uint16, np.int16, np.int32),
(np.uint32, np.int32, np.int64))
float32_max = struct.unpack('<f', b'\xff\xff\xff\x7e')[0]
float64_max = struct.unpack('<d', b'\xff\xff\xff\xff\xff\xff\xdf\x7f')[0]
for col in data:
dtype = data[col].dtype
# Cast from unsupported types to supported types
for c_data in conversion_data:
if dtype == c_data[0]:
if data[col].max() <= np.iinfo(c_data[1]).max:
dtype = c_data[1]
else:
dtype = c_data[2]
if c_data[2] == np.float64: # Warn if necessary
if data[col].max() >= 2 ** 53:
ws = precision_loss_doc % ('uint64', 'float64')
data[col] = data[col].astype(dtype)
# Check values and upcast if necessary
if dtype == np.int8:
if data[col].max() > 100 or data[col].min() < -127:
data[col] = data[col].astype(np.int16)
elif dtype == np.int16:
if data[col].max() > 32740 or data[col].min() < -32767:
data[col] = data[col].astype(np.int32)
elif dtype == np.int64:
if (data[col].max() <= 2147483620 and
data[col].min() >= -2147483647):
data[col] = data[col].astype(np.int32)
else:
data[col] = data[col].astype(np.float64)
if data[col].max() >= 2 ** 53 or data[col].min() <= -2 ** 53:
ws = precision_loss_doc % ('int64', 'float64')
elif dtype in (np.float32, np.float64):
value = data[col].max()
if np.isinf(value):
msg = 'Column {0} has a maximum value of infinity which is ' \
'outside the range supported by Stata.'
raise ValueError(msg.format(col))
if dtype == np.float32 and value > float32_max:
data[col] = data[col].astype(np.float64)
elif dtype == np.float64:
if value > float64_max:
msg = 'Column {0} has a maximum value ({1}) outside the ' \
'range supported by Stata ({1})'
raise ValueError(msg.format(col, value, float64_max))
if ws:
import warnings
warnings.warn(ws, PossiblePrecisionLoss)
return data
class StataValueLabel(object):
"""
Parse a categorical column and prepare formatted output
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Methods
-------
generate_value_label
"""
def __init__(self, catarray):
self.labname = catarray.name
categories = catarray.cat.categories
self.value_labels = list(zip(np.arange(len(categories)), categories))
self.value_labels.sort(key=lambda x: x[0])
self.text_len = np.int32(0)
self.off = []
self.val = []
self.txt = []
self.n = 0
# Compute lengths and setup lists of offsets and labels
for vl in self.value_labels:
category = vl[1]
if not isinstance(category, string_types):
category = str(category)
import warnings
warnings.warn(value_label_mismatch_doc.format(catarray.name),
ValueLabelTypeMismatch)
self.off.append(self.text_len)
self.text_len += len(category) + 1 # +1 for the padding
self.val.append(vl[0])
self.txt.append(category)
self.n += 1
if self.text_len > 32000:
raise ValueError('Stata value labels for a single variable must '
'have a combined length less than 32,000 '
'characters.')
# Ensure int32
self.off = np.array(self.off, dtype=np.int32)
self.val = np.array(self.val, dtype=np.int32)
# Total length
self.len = 4 + 4 + 4 * self.n + 4 * self.n + self.text_len
def _encode(self, s):
"""
Python 3 compatability shim
"""
if compat.PY3:
return s.encode(self._encoding)
else:
return s
def generate_value_label(self, byteorder, encoding):
"""
Parameters
----------
byteorder : str
Byte order of the output
encoding : str
File encoding
Returns
-------
value_label : bytes
Bytes containing the formatted value label
"""
self._encoding = encoding
bio = BytesIO()
null_string = '\x00'
null_byte = b'\x00'
# len
bio.write(struct.pack(byteorder + 'i', self.len))
# labname
labname = self._encode(_pad_bytes(self.labname[:32], 33))
bio.write(labname)
# padding - 3 bytes
for i in range(3):
bio.write(struct.pack('c', null_byte))
# value_label_table
# n - int32
bio.write(struct.pack(byteorder + 'i', self.n))
# textlen - int32
bio.write(struct.pack(byteorder + 'i', self.text_len))
# off - int32 array (n elements)
for offset in self.off:
bio.write(struct.pack(byteorder + 'i', offset))
# val - int32 array (n elements)
for value in self.val:
bio.write(struct.pack(byteorder + 'i', value))
# txt - Text labels, null terminated
for text in self.txt:
bio.write(self._encode(text + null_string))
bio.seek(0)
return bio.read()
class StataMissingValue(StringMixin):
"""
An observation's missing value.
Parameters
-----------
value : int8, int16, int32, float32 or float64
The Stata missing value code
Attributes
----------
string : string
String representation of the Stata missing value
value : int8, int16, int32, float32 or float64
The original encoded missing value
Notes
-----
More information: <http://www.stata.com/help.cgi?missing>
Integer missing values make the code '.', '.a', ..., '.z' to the ranges
101 ... 127 (for int8), 32741 ... 32767 (for int16) and 2147483621 ...
2147483647 (for int32). Missing values for floating point data types are
more complex but the pattern is simple to discern from the following table.
np.float32 missing values (float in Stata)
0000007f .
0008007f .a
0010007f .b
...
00c0007f .x
00c8007f .y
00d0007f .z
np.float64 missing values (double in Stata)
000000000000e07f .
000000000001e07f .a
000000000002e07f .b
...
000000000018e07f .x
000000000019e07f .y
00000000001ae07f .z
"""
# Construct a dictionary of missing values
MISSING_VALUES = {}
bases = (101, 32741, 2147483621)
for b in bases:
# Conversion to long to avoid hash issues on 32 bit platforms #8968
MISSING_VALUES[compat.long(b)] = '.'
for i in range(1, 27):
MISSING_VALUES[compat.long(i + b)] = '.' + chr(96 + i)
float32_base = b'\x00\x00\x00\x7f'
increment = struct.unpack('<i', b'\x00\x08\x00\x00')[0]
for i in range(27):
value = struct.unpack('<f', float32_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('<i', struct.pack('<f', value))[
0] + increment
float32_base = struct.pack('<i', int_value)
float64_base = b'\x00\x00\x00\x00\x00\x00\xe0\x7f'
increment = struct.unpack('q', b'\x00\x00\x00\x00\x00\x01\x00\x00')[0]
for i in range(27):
value = struct.unpack('<d', float64_base)[0]
MISSING_VALUES[value] = '.'
if i > 0:
MISSING_VALUES[value] += chr(96 + i)
int_value = struct.unpack('q', struct.pack('<d', value))[0] + increment
float64_base = struct.pack('q', int_value)
BASE_MISSING_VALUES = {'int8': 101,
'int16': 32741,
'int32': 2147483621,
'float32': struct.unpack('<f', float32_base)[0],
'float64': struct.unpack('<d', float64_base)[0]}
def __init__(self, value):
self._value = value
# Conversion to long to avoid hash issues on 32 bit platforms #8968
value = compat.long(value) if value < 2147483648 else float(value)
self._str = self.MISSING_VALUES[value]
string = property(lambda self: self._str,
doc="The Stata representation of the missing value: "
"'.', '.a'..'.z'")
value = property(lambda self: self._value,
doc='The binary representation of the missing value.')
def __unicode__(self):
return self.string
def __repr__(self):
# not perfect :-/
return "%s(%s)" % (self.__class__, self)
def __eq__(self, other):
return (isinstance(other, self.__class__) and
self.string == other.string and self.value == other.value)
@classmethod
def get_base_missing_value(cls, dtype):
if dtype == np.int8:
value = cls.BASE_MISSING_VALUES['int8']
elif dtype == np.int16:
value = cls.BASE_MISSING_VALUES['int16']
elif dtype == np.int32:
value = cls.BASE_MISSING_VALUES['int32']
elif dtype == np.float32:
value = cls.BASE_MISSING_VALUES['float32']
elif dtype == np.float64:
value = cls.BASE_MISSING_VALUES['float64']
else:
raise ValueError('Unsupported dtype')
return value
class StataParser(object):
_default_encoding = 'iso-8859-1'
def __init__(self, encoding):
self._encoding = encoding
# type code.
# --------------------
# str1 1 = 0x01
# str2 2 = 0x02
# ...
# str244 244 = 0xf4
# byte 251 = 0xfb (sic)
# int 252 = 0xfc
# long 253 = 0xfd
# float 254 = 0xfe
# double 255 = 0xff
# --------------------
# NOTE: the byte type seems to be reserved for categorical variables
# with a label, but the underlying variable is -127 to 100
# we're going to drop the label and cast to int
self.DTYPE_MAP = \
dict(
lzip(range(1, 245), ['a' + str(i) for i in range(1, 245)]) +
[
(251, np.int8),
(252, np.int16),
(253, np.int32),
(254, np.float32),
(255, np.float64)
]
)
self.DTYPE_MAP_XML = \
dict(
[
(32768, np.uint8), # Keys to GSO
(65526, np.float64),
(65527, np.float32),
(65528, np.int32),
(65529, np.int16),
(65530, np.int8)
]
)
self.TYPE_MAP = lrange(251) + list('bhlfd')
self.TYPE_MAP_XML = \
dict(
[
# Not really a Q, unclear how to handle byteswap
(32768, 'Q'),
(65526, 'd'),
(65527, 'f'),
(65528, 'l'),
(65529, 'h'),
(65530, 'b')
]
)
# NOTE: technically, some of these are wrong. there are more numbers
# that can be represented. it's the 27 ABOVE and BELOW the max listed
# numeric data type in [U] 12.2.2 of the 11.2 manual
float32_min = b'\xff\xff\xff\xfe'
float32_max = b'\xff\xff\xff\x7e'
float64_min = b'\xff\xff\xff\xff\xff\xff\xef\xff'
float64_max = b'\xff\xff\xff\xff\xff\xff\xdf\x7f'
self.VALID_RANGE = {
'b': (-127, 100),
'h': (-32767, 32740),
'l': (-2147483647, 2147483620),
'f': (np.float32(struct.unpack('<f', float32_min)[0]),
np.float32(struct.unpack('<f', float32_max)[0])),
'd': (np.float64(struct.unpack('<d', float64_min)[0]),
np.float64(struct.unpack('<d', float64_max)[0]))
}
self.OLD_TYPE_MAPPING = {
98: 251, # byte
105: 252, # int
108: 253, # long
102: 254 # float
# don't know old code for double
}
# These missing values are the generic '.' in Stata, and are used
# to replace nans
self.MISSING_VALUES = {
'b': 101,
'h': 32741,
'l': 2147483621,
'f': np.float32(struct.unpack('<f', b'\x00\x00\x00\x7f')[0]),
'd': np.float64(
struct.unpack('<d', b'\x00\x00\x00\x00\x00\x00\xe0\x7f')[0])
}
self.NUMPY_TYPE_MAP = {
'b': 'i1',
'h': 'i2',
'l': 'i4',
'f': 'f4',
'd': 'f8',
'Q': 'u8'
}
# Reserved words cannot be used as variable names
self.RESERVED_WORDS = ('aggregate', 'array', 'boolean', 'break',
'byte', 'case', 'catch', 'class', 'colvector',
'complex', 'const', 'continue', 'default',
'delegate', 'delete', 'do', 'double', 'else',
'eltypedef', 'end', 'enum', 'explicit',
'export', 'external', 'float', 'for', 'friend',
'function', 'global', 'goto', 'if', 'inline',
'int', 'local', 'long', 'NULL', 'pragma',
'protected', 'quad', 'rowvector', 'short',
'typedef', 'typename', 'virtual')
class StataReader(StataParser, BaseIterator):
__doc__ = _stata_reader_doc
def __init__(self, path_or_buf, convert_dates=True,
convert_categoricals=True, index=None,
convert_missing=False, preserve_dtypes=True,
columns=None, order_categoricals=True,
encoding='iso-8859-1', chunksize=None):
super(StataReader, self).__init__(encoding)
self.col_sizes = ()
# Arguments to the reader (can be temporarily overridden in
# calls to read).
self._convert_dates = convert_dates
self._convert_categoricals = convert_categoricals
self._index = index
self._convert_missing = convert_missing
self._preserve_dtypes = preserve_dtypes
self._columns = columns
self._order_categoricals = order_categoricals
self._encoding = encoding
self._chunksize = chunksize
# State variables for the file
self._has_string_data = False
self._missing_values = False
self._can_read_value_labels = False
self._column_selector_set = False
self._value_labels_read = False
self._data_read = False
self._dtype = None
self._lines_read = 0
self._native_byteorder = _set_endianness(sys.byteorder)
if isinstance(path_or_buf, str):
path_or_buf, encoding, _ = get_filepath_or_buffer(
path_or_buf, encoding=self._default_encoding
)
if isinstance(path_or_buf, (str, compat.text_type, bytes)):
self.path_or_buf = open(path_or_buf, 'rb')
else:
# Copy to BytesIO, and ensure no encoding
contents = path_or_buf.read()
try:
contents = contents.encode(self._default_encoding)
except:
pass
self.path_or_buf = BytesIO(contents)
self._read_header()
def __enter__(self):
""" enter context manager """
return self
def __exit__(self, exc_type, exc_value, traceback):
""" exit context manager """
self.close()
def close(self):
""" close the handle if its open """
try:
self.path_or_buf.close()
except IOError:
pass
def _read_header(self):
first_char = self.path_or_buf.read(1)
if struct.unpack('c', first_char)[0] == b'<':
self._read_new_header(first_char)
else:
self._read_old_header(first_char)
self.has_string_data = len([x for x in self.typlist
if type(x) is int]) > 0
# calculate size of a data record
self.col_sizes = lmap(lambda x: self._calcsize(x), self.typlist)
# remove format details from %td
self.fmtlist = ["%td" if x.startswith("%td") else x
for x in self.fmtlist]
def _read_new_header(self, first_char):
# The first part of the header is common to 117 and 118.
self.path_or_buf.read(27) # stata_dta><header><release>
self.format_version = int(self.path_or_buf.read(3))
if self.format_version not in [117, 118]:
raise ValueError(_version_error)
self.path_or_buf.read(21) # </release><byteorder>
self.byteorder = self.path_or_buf.read(3) == "MSF" and '>' or '<'
self.path_or_buf.read(15) # </byteorder><K>
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.path_or_buf.read(7) # </K><N>
self.nobs = self._get_nobs()
self.path_or_buf.read(11) # </N><label>
self.data_label = self._get_data_label()
self.path_or_buf.read(19) # </label><timestamp>
self.time_stamp = self._get_time_stamp()
self.path_or_buf.read(26) # </timestamp></header><map>
self.path_or_buf.read(8) # 0x0000000000000000
self.path_or_buf.read(8) # position of <map>
self._seek_vartypes = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 16
self._seek_varnames = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_sortlist = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 10
self._seek_formats = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 9
self._seek_value_label_names = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 19
# Requires version-specific treatment
self._seek_variable_labels = self._get_seek_variable_labels()
self.path_or_buf.read(8) # <characteristics>
self.data_location = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 6
self.seek_strls = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 7
self.seek_value_labels = struct.unpack(
self.byteorder + 'q', self.path_or_buf.read(8))[0] + 14
self.typlist, self.dtyplist = self._get_dtypes(self._seek_vartypes)
self.path_or_buf.seek(self._seek_varnames)
self.varlist = self._get_varlist()
self.path_or_buf.seek(self._seek_sortlist)
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.path_or_buf.seek(self._seek_formats)
self.fmtlist = self._get_fmtlist()
self.path_or_buf.seek(self._seek_value_label_names)
self.lbllist = self._get_lbllist()
self.path_or_buf.seek(self._seek_variable_labels)
self._variable_labels = self._get_variable_labels()
# Get data type information, works for versions 117-118.
def _get_dtypes(self, seek_vartypes):
self.path_or_buf.seek(seek_vartypes)
raw_typlist = [struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
for i in range(self.nvar)]
def f(typ):
if typ <= 2045:
return typ
try:
return self.TYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata types [{0}]".
format(typ))
typlist = [f(x) for x in raw_typlist]
def f(typ):
if typ <= 2045:
return str(typ)
try:
return self.DTYPE_MAP_XML[typ]
except KeyError:
raise ValueError("cannot convert stata dtype [{0}]"
.format(typ))
dtyplist = [f(x) for x in raw_typlist]
return typlist, dtyplist
def _get_varlist(self):
if self.format_version == 117:
b = 33
elif self.format_version == 118:
b = 129
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the format list
def _get_fmtlist(self):
if self.format_version == 118:
b = 57
elif self.format_version > 113:
b = 49
elif self.format_version > 104:
b = 12
else:
b = 7
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
# Returns the label list
def _get_lbllist(self):
if self.format_version >= 118:
b = 129
elif self.format_version > 108:
b = 33
else:
b = 9
return [self._null_terminate(self.path_or_buf.read(b))
for i in range(self.nvar)]
def _get_variable_labels(self):
if self.format_version == 118:
vlblist = [self._decode(self.path_or_buf.read(321))
for i in range(self.nvar)]
elif self.format_version > 105:
vlblist = [self._null_terminate(self.path_or_buf.read(81))
for i in range(self.nvar)]
else:
vlblist = [self._null_terminate(self.path_or_buf.read(32))
for i in range(self.nvar)]
return vlblist
def _get_nobs(self):
if self.format_version == 118:
return struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
return struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
def _get_data_label(self):
if self.format_version == 118:
strlen = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
return self._decode(self.path_or_buf.read(strlen))
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 105:
return self._null_terminate(self.path_or_buf.read(81))
else:
return self._null_terminate(self.path_or_buf.read(32))
def _get_time_stamp(self):
if self.format_version == 118:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self.path_or_buf.read(strlen).decode("utf-8")
elif self.format_version == 117:
strlen = struct.unpack('b', self.path_or_buf.read(1))[0]
return self._null_terminate(self.path_or_buf.read(strlen))
elif self.format_version > 104:
return self._null_terminate(self.path_or_buf.read(18))
else:
raise ValueError()
def _get_seek_variable_labels(self):
if self.format_version == 117:
self.path_or_buf.read(8) # <variable_lables>, throw away
# Stata 117 data files do not follow the described format. This is
# a work around that uses the previous label, 33 bytes for each
# variable, 20 for the closing tag and 17 for the opening tag
return self._seek_value_label_names + (33 * self.nvar) + 20 + 17
elif self.format_version == 118:
return struct.unpack(self.byteorder + 'q',
self.path_or_buf.read(8))[0] + 17
else:
raise ValueError()
def _read_old_header(self, first_char):
self.format_version = struct.unpack('b', first_char)[0]
if self.format_version not in [104, 105, 108, 111, 113, 114, 115]:
raise ValueError(_version_error)
self.byteorder = struct.unpack('b', self.path_or_buf.read(1))[
0] == 0x1 and '>' or '<'
self.filetype = struct.unpack('b', self.path_or_buf.read(1))[0]
self.path_or_buf.read(1) # unused
self.nvar = struct.unpack(self.byteorder + 'H',
self.path_or_buf.read(2))[0]
self.nobs = self._get_nobs()
self.data_label = self._get_data_label()
self.time_stamp = self._get_time_stamp()
# descriptors
if self.format_version > 108:
typlist = [ord(self.path_or_buf.read(1))
for i in range(self.nvar)]
else:
buf = self.path_or_buf.read(self.nvar)
typlistb = np.frombuffer(buf, dtype=np.uint8)
typlist = []
for tp in typlistb:
if tp in self.OLD_TYPE_MAPPING:
typlist.append(self.OLD_TYPE_MAPPING[tp])
else:
typlist.append(tp - 127) # py2 string, py3 bytes
try:
self.typlist = [self.TYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata types [{0}]"
.format(','.join(str(x) for x in typlist)))
try:
self.dtyplist = [self.DTYPE_MAP[typ] for typ in typlist]
except:
raise ValueError("cannot convert stata dtypes [{0}]"
.format(','.join(str(x) for x in typlist)))
if self.format_version > 108:
self.varlist = [self._null_terminate(self.path_or_buf.read(33))
for i in range(self.nvar)]
else:
self.varlist = [self._null_terminate(self.path_or_buf.read(9))
for i in range(self.nvar)]
self.srtlist = struct.unpack(
self.byteorder + ('h' * (self.nvar + 1)),
self.path_or_buf.read(2 * (self.nvar + 1))
)[:-1]
self.fmtlist = self._get_fmtlist()
self.lbllist = self._get_lbllist()
self._variable_labels = self._get_variable_labels()
# ignore expansion fields (Format 105 and later)
# When reading, read five bytes; the last four bytes now tell you
# the size of the next read, which you discard. You then continue
# like this until you read 5 bytes of zeros.
if self.format_version > 104:
while True:
data_type = struct.unpack(self.byteorder + 'b',
self.path_or_buf.read(1))[0]
if self.format_version > 108:
data_len = struct.unpack(self.byteorder + 'i',
self.path_or_buf.read(4))[0]
else:
data_len = struct.unpack(self.byteorder + 'h',
self.path_or_buf.read(2))[0]
if data_type == 0:
break
self.path_or_buf.read(data_len)
# necessary data to continue parsing
self.data_location = self.path_or_buf.tell()
def _calcsize(self, fmt):
return (type(fmt) is int and fmt or
struct.calcsize(self.byteorder + fmt))
def _decode(self, s):
s = s.partition(b"\0")[0]
return s.decode('utf-8')
def _null_terminate(self, s):
if compat.PY3 or self._encoding is not None:
# have bytes not strings, so must decode
s = s.partition(b"\0")[0]
return s.decode(self._encoding or self._default_encoding)
else:
null_byte = "\0"
try:
return s.lstrip(null_byte)[:s.index(null_byte)]
except:
return s
def _read_value_labels(self):
if self.format_version <= 108:
# Value labels are not supported in version 108 and earlier.
return
if self._value_labels_read:
# Don't read twice
return
if self.format_version >= 117:
self.path_or_buf.seek(self.seek_value_labels)
else:
offset = self.nobs * self._dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
self._value_labels_read = True
self.value_label_dict = dict()
while True:
if self.format_version >= 117:
if self.path_or_buf.read(5) == b'</val': # <lbl>
break # end of value label table
slength = self.path_or_buf.read(4)
if not slength:
break # end of value label table (format < 117)
if self.format_version <= 117:
labname = self._null_terminate(self.path_or_buf.read(33))
else:
labname = self._decode(self.path_or_buf.read(129))
self.path_or_buf.read(3) # padding
n = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
txtlen = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
off = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
val = np.frombuffer(self.path_or_buf.read(4 * n),
dtype=self.byteorder + "i4",
count=n)
ii = np.argsort(off)
off = off[ii]
val = val[ii]
txt = self.path_or_buf.read(txtlen)
self.value_label_dict[labname] = dict()
for i in range(n):
end = off[i + 1] if i < n - 1 else txtlen
if self.format_version <= 117:
self.value_label_dict[labname][val[i]] = (
self._null_terminate(txt[off[i]:end]))
else:
self.value_label_dict[labname][val[i]] = (
self._decode(txt[off[i]:end]))
if self.format_version >= 117:
self.path_or_buf.read(6) # </lbl>
self._value_labels_read = True
def _read_strls(self):
self.path_or_buf.seek(self.seek_strls)
self.GSO = {0: ''}
while True:
if self.path_or_buf.read(3) != b'GSO':
break
if self.format_version == 117:
v_o = struct.unpack(self.byteorder + 'Q',
self.path_or_buf.read(8))[0]
else:
buf = self.path_or_buf.read(12)
# Only tested on little endian file on little endian machine.
if self.byteorder == '<':
buf = buf[0:2] + buf[4:10]
else:
buf = buf[0:2] + buf[6:]
v_o = struct.unpack('Q', buf)[0]
typ = struct.unpack('B', self.path_or_buf.read(1))[0]
length = struct.unpack(self.byteorder + 'I',
self.path_or_buf.read(4))[0]
va = self.path_or_buf.read(length)
if typ == 130:
encoding = 'utf-8'
if self.format_version == 117:
encoding = self._encoding or self._default_encoding
va = va[0:-1].decode(encoding)
self.GSO[v_o] = va
# legacy
@Appender('DEPRECATED: ' + _data_method_doc)
def data(self, **kwargs):
import warnings
warnings.warn("'data' is deprecated, use 'read' instead")
if self._data_read:
raise Exception("Data has already been read.")
self._data_read = True
return self.read(None, **kwargs)
def __next__(self):
return self.read(nrows=self._chunksize or 1)
def get_chunk(self, size=None):
"""
Reads lines from Stata file and returns as dataframe
Parameters
----------
size : int, defaults to None
Number of lines to read. If None, reads whole file.
Returns
-------
DataFrame
"""
if size is None:
size = self._chunksize
return self.read(nrows=size)
@Appender(_read_method_doc)
def read(self, nrows=None, convert_dates=None,
convert_categoricals=None, index=None,
convert_missing=None, preserve_dtypes=None,
columns=None, order_categoricals=None):
# Handle empty file or chunk. If reading incrementally raise
# StopIteration. If reading the whole thing return an empty
# data frame.
if (self.nobs == 0) and (nrows is None):
self._can_read_value_labels = True
self._data_read = True
self.close()
return DataFrame(columns=self.varlist)
# Handle options
if convert_dates is None:
convert_dates = self._convert_dates
if convert_categoricals is None:
convert_categoricals = self._convert_categoricals
if convert_missing is None:
convert_missing = self._convert_missing
if preserve_dtypes is None:
preserve_dtypes = self._preserve_dtypes
if columns is None:
columns = self._columns
if order_categoricals is None:
order_categoricals = self._order_categoricals
if nrows is None:
nrows = self.nobs
if (self.format_version >= 117) and (self._dtype is None):
self._can_read_value_labels = True
self._read_strls()
# Setup the dtype.
if self._dtype is None:
dtype = [] # Convert struct data types to numpy data type
for i, typ in enumerate(self.typlist):
if typ in self.NUMPY_TYPE_MAP:
dtype.append(('s' + str(i), self.byteorder +
self.NUMPY_TYPE_MAP[typ]))
else:
dtype.append(('s' + str(i), 'S' + str(typ)))
dtype = np.dtype(dtype)
self._dtype = dtype
# Read data
dtype = self._dtype
max_read_len = (self.nobs - self._lines_read) * dtype.itemsize
read_len = nrows * dtype.itemsize
read_len = min(read_len, max_read_len)
if read_len <= 0:
# Iterator has finished, should never be here unless
# we are reading the file incrementally
if convert_categoricals:
self._read_value_labels()
self.close()
raise StopIteration
offset = self._lines_read * dtype.itemsize
self.path_or_buf.seek(self.data_location + offset)
read_lines = min(nrows, self.nobs - self._lines_read)
data = np.frombuffer(self.path_or_buf.read(read_len), dtype=dtype,
count=read_lines)
self._lines_read += read_lines
if self._lines_read == self.nobs:
self._can_read_value_labels = True
self._data_read = True
# if necessary, swap the byte order to native here
if self.byteorder != self._native_byteorder:
data = data.byteswap().newbyteorder()
if convert_categoricals:
self._read_value_labels()
if len(data) == 0:
data = DataFrame(columns=self.varlist, index=index)
else:
data = DataFrame.from_records(data, index=index)
data.columns = self.varlist
# If index is not specified, use actual row number rather than
# restarting at 0 for each chunk.
if index is None:
ix = np.arange(self._lines_read - read_lines, self._lines_read)
data = data.set_index(ix)
if columns is not None:
try:
data = self._do_select_columns(data, columns)
except ValueError:
self.close()
raise
# Decode strings
for col, typ in zip(data, self.typlist):
if type(typ) is int:
data[col] = data[col].apply(
self._null_terminate, convert_dtype=True)
data = self._insert_strls(data)
cols_ = np.where(self.dtyplist)[0]
# Convert columns (if needed) to match input type
index = data.index
requires_type_conversion = False
data_formatted = []
for i in cols_:
if self.dtyplist[i] is not None:
col = data.columns[i]
dtype = data[col].dtype
if dtype != np.dtype(object) and dtype != self.dtyplist[i]:
requires_type_conversion = True
data_formatted.append(
(col, Series(data[col], index, self.dtyplist[i])))
else:
data_formatted.append((col, data[col]))
if requires_type_conversion:
data = DataFrame.from_items(data_formatted)
del data_formatted
self._do_convert_missing(data, convert_missing)
if convert_dates:
cols = np.where(lmap(lambda x: x in _date_formats,
self.fmtlist))[0]
for i in cols:
col = data.columns[i]
try:
data[col] = _stata_elapsed_date_to_datetime_vec(
data[col],
self.fmtlist[i])
except ValueError:
self.close()
raise
if convert_categoricals and self.format_version > 108:
data = self._do_convert_categoricals(data,
self.value_label_dict,
self.lbllist,
order_categoricals)
if not preserve_dtypes:
retyped_data = []
convert = False
for col in data:
dtype = data[col].dtype
if dtype in (np.float16, np.float32):
dtype = np.float64
convert = True
elif dtype in (np.int8, np.int16, np.int32):
dtype = np.int64
convert = True
retyped_data.append((col, data[col].astype(dtype)))
if convert:
data = DataFrame.from_items(retyped_data)
return data
def _do_convert_missing(self, data, convert_missing):
# Check for missing values, and replace if found
for i, colname in enumerate(data):
fmt = self.typlist[i]
if fmt not in self.VALID_RANGE:
continue
nmin, nmax = self.VALID_RANGE[fmt]
series = data[colname]
missing = np.logical_or(series < nmin, series > nmax)
if not missing.any():
continue
if convert_missing: # Replacement follows Stata notation
missing_loc = np.argwhere(missing)
umissing, umissing_loc = np.unique(series[missing],
return_inverse=True)
replacement = Series(series, dtype=np.object)
for j, um in enumerate(umissing):
missing_value = StataMissingValue(um)
loc = missing_loc[umissing_loc == j]
replacement.iloc[loc] = missing_value
else: # All replacements are identical
dtype = series.dtype
if dtype not in (np.float32, np.float64):
dtype = np.float64
replacement = Series(series, dtype=dtype)
replacement[missing] = np.nan
data[colname] = replacement
def _insert_strls(self, data):
if not hasattr(self, 'GSO') or len(self.GSO) == 0:
return data
for i, typ in enumerate(self.typlist):
if typ != 'Q':
continue
data.iloc[:, i] = [self.GSO[k] for k in data.iloc[:, i]]
return data
def _do_select_columns(self, data, columns):
if not self._column_selector_set:
column_set = set(columns)
if len(column_set) != len(columns):
raise ValueError('columns contains duplicate entries')
unmatched = column_set.difference(data.columns)
if unmatched:
raise ValueError('The following columns were not found in the '
'Stata data set: ' +
', '.join(list(unmatched)))
# Copy information for retained columns for later processing
dtyplist = []
typlist = []
fmtlist = []
lbllist = []
for col in columns:
i = data.columns.get_loc(col)
dtyplist.append(self.dtyplist[i])
typlist.append(self.typlist[i])
fmtlist.append(self.fmtlist[i])
lbllist.append(self.lbllist[i])
self.dtyplist = dtyplist
self.typlist = typlist
self.fmtlist = fmtlist
self.lbllist = lbllist
self._column_selector_set = True
return data[columns]
def _do_convert_categoricals(self, data, value_label_dict, lbllist,
order_categoricals):
"""
Converts categorical columns to Categorical type.
"""
value_labels = list(compat.iterkeys(value_label_dict))
cat_converted_data = []
for col, label in zip(data, lbllist):
if label in value_labels:
# Explicit call with ordered=True
cat_data = Categorical(data[col], ordered=order_categoricals)
categories = []
for category in cat_data.categories:
if category in value_label_dict[label]:
categories.append(value_label_dict[label][category])
else:
categories.append(category) # Partially labeled
try:
cat_data.categories = categories
except ValueError:
vc = Series(categories).value_counts()
repeats = list(vc.index[vc > 1])
repeats = '\n' + '-' * 80 + '\n'.join(repeats)
msg = 'Value labels for column {0} are not unique. The ' \
'repeated labels are:\n{1}'.format(col, repeats)
raise ValueError(msg)
# TODO: is the next line needed above in the data(...) method?
cat_data = Series(cat_data, index=data.index)
cat_converted_data.append((col, cat_data))
else:
cat_converted_data.append((col, data[col]))
data = DataFrame.from_items(cat_converted_data)
return data
def data_label(self):
"""Returns data label of Stata file"""
return self.data_label
def variable_labels(self):
"""Returns variable labels as a dict, associating each variable name
with corresponding label
"""
return dict(zip(self.varlist, self._variable_labels))
def value_labels(self):
"""Returns a dict, associating each variable name a dict, associating
each value its corresponding label
"""
if not self._value_labels_read:
self._read_value_labels()
return self.value_label_dict
def _open_file_binary_write(fname, encoding):
if hasattr(fname, 'write'):
# if 'b' not in fname.mode:
return fname
return open(fname, "wb")
def _set_endianness(endianness):
if endianness.lower() in ["<", "little"]:
return "<"
elif endianness.lower() in [">", "big"]:
return ">"
else: # pragma : no cover
raise ValueError("Endianness %s not understood" % endianness)
def _pad_bytes(name, length):
"""
Takes a char string and pads it with null bytes until it's length chars
"""
return name + "\x00" * (length - len(name))
def _convert_datetime_to_stata_type(fmt):
"""
Converts from one of the stata date formats to a type in TYPE_MAP
"""
if fmt in ["tc", "%tc", "td", "%td", "tw", "%tw", "tm", "%tm", "tq",
"%tq", "th", "%th", "ty", "%ty"]:
return np.float64 # Stata expects doubles for SIFs
else:
raise NotImplementedError("Format %s not implemented" % fmt)
def _maybe_convert_to_int_keys(convert_dates, varlist):
new_dict = {}
for key in convert_dates:
if not convert_dates[key].startswith("%"): # make sure proper fmts
convert_dates[key] = "%" + convert_dates[key]
if key in varlist:
new_dict.update({varlist.index(key): convert_dates[key]})
else:
if not isinstance(key, int):
raise ValueError("convert_dates key must be a "
"column or an integer")
new_dict.update({key: convert_dates[key]})
return new_dict
def _dtype_to_stata_type(dtype, column):
"""
Converts dtype types to stata types. Returns the byte of the given ordinal.
See TYPE_MAP and comments for an explanation. This is also explained in
the dta spec.
1 - 244 are strings of this length
Pandas Stata
251 - chr(251) - for int8 byte
252 - chr(252) - for int16 int
253 - chr(253) - for int32 long
254 - chr(254) - for float32 float
255 - chr(255) - for double double
If there are dates to convert, then dtype will already have the correct
type inserted.
"""
# TODO: expand to handle datetime to integer conversion
if dtype.type == np.string_:
return chr(dtype.itemsize)
elif dtype.type == np.object_: # try to coerce it to the biggest string
# not memory efficient, what else could we
# do?
itemsize = max_len_string_array(_ensure_object(column.values))
return chr(max(itemsize, 1))
elif dtype == np.float64:
return chr(255)
elif dtype == np.float32:
return chr(254)
elif dtype == np.int32:
return chr(253)
elif dtype == np.int16:
return chr(252)
elif dtype == np.int8:
return chr(251)
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
def _dtype_to_default_stata_fmt(dtype, column):
"""
Maps numpy dtype to stata's default format for this type. Not terribly
important since users can change this in Stata. Semantics are
object -> "%DDs" where DD is the length of the string. If not a string,
raise ValueError
float64 -> "%10.0g"
float32 -> "%9.0g"
int64 -> "%9.0g"
int32 -> "%12.0g"
int16 -> "%8.0g"
int8 -> "%8.0g"
"""
# TODO: Refactor to combine type with format
# TODO: expand this to handle a default datetime format?
if dtype.type == np.object_:
inferred_dtype = infer_dtype(column.dropna())
if not (inferred_dtype in ('string', 'unicode') or
len(column) == 0):
raise ValueError('Writing general object arrays is not supported')
itemsize = max_len_string_array(_ensure_object(column.values))
if itemsize > 244:
raise ValueError(excessive_string_length_error % column.name)
return "%" + str(max(itemsize, 1)) + "s"
elif dtype == np.float64:
return "%10.0g"
elif dtype == np.float32:
return "%9.0g"
elif dtype == np.int32:
return "%12.0g"
elif dtype == np.int8 or dtype == np.int16:
return "%8.0g"
else: # pragma : no cover
raise NotImplementedError("Data type %s not supported." % dtype)
class StataWriter(StataParser):
"""
A class for writing Stata binary dta files
Parameters
----------
fname : str or buffer
String path of file-like object
data : DataFrame
Input to save
convert_dates : dict
Dictionary mapping columns containing datetime types to stata internal
format to use when wirting the dates. Options are 'tc', 'td', 'tm',
'tw', 'th', 'tq', 'ty'. Column can be either an integer or a name.
Datetime columns that do not have a conversion type specified will be
converted to 'tc'. Raises NotImplementedError if a datetime column has
timezone information
write_index : bool
Write the index to Stata dataset.
encoding : str
Default is latin-1. Unicode is not supported
byteorder : str
Can be ">", "<", "little", or "big". default is `sys.byteorder`
time_stamp : datetime
A datetime to use as file creation date. Default is the current time
dataset_label : str
A label for the data set. Must be 80 characters or smaller.
variable_labels : dict
Dictionary containing columns as keys and variable labels as values.
Each label must be 80 characters or smaller.
.. versionadded:: 0.19.0
Returns
-------
writer : StataWriter instance
The StataWriter instance has a write_file method, which will
write the file to the given `fname`.
Raises
------
NotImplementedError
* If datetimes contain timezone information
ValueError
* Columns listed in convert_dates are noth either datetime64[ns]
or datetime.datetime
* Column dtype is not representable in Stata
* Column listed in convert_dates is not in DataFrame
* Categorical label contains more than 32,000 characters
Examples
--------
>>> import pandas as pd
>>> data = pd.DataFrame([[1.0, 1]], columns=['a', 'b'])
>>> writer = StataWriter('./data_file.dta', data)
>>> writer.write_file()
Or with dates
>>> from datetime import datetime
>>> data = pd.DataFrame([[datetime(2000,1,1)]], columns=['date'])
>>> writer = StataWriter('./date_data_file.dta', data, {'date' : 'tw'})
>>> writer.write_file()
"""
def __init__(self, fname, data, convert_dates=None, write_index=True,
encoding="latin-1", byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
super(StataWriter, self).__init__(encoding)
self._convert_dates = {} if convert_dates is None else convert_dates
self._write_index = write_index
self._time_stamp = time_stamp
self._data_label = data_label
self._variable_labels = variable_labels
# attach nobs, nvars, data, varlist, typlist
self._prepare_pandas(data)
if byteorder is None:
byteorder = sys.byteorder
self._byteorder = _set_endianness(byteorder)
self._fname = fname
self.type_converters = {253: np.int32, 252: np.int16, 251: np.int8}
def _write(self, to_write):
"""
Helper to call encode before writing to file for Python 3 compat.
"""
if compat.PY3:
self._file.write(to_write.encode(self._encoding or
self._default_encoding))
else:
self._file.write(to_write)
def _prepare_categoricals(self, data):
"""Check for categorical columns, retain categorical information for
Stata file and convert categorical data to int"""
is_cat = [is_categorical_dtype(data[col]) for col in data]
self._is_col_cat = is_cat
self._value_labels = []
if not any(is_cat):
return data
get_base_missing_value = StataMissingValue.get_base_missing_value
index = data.index
data_formatted = []
for col, col_is_cat in zip(data, is_cat):
if col_is_cat:
self._value_labels.append(StataValueLabel(data[col]))
dtype = data[col].cat.codes.dtype
if dtype == np.int64:
raise ValueError('It is not possible to export '
'int64-based categorical data to Stata.')
values = data[col].cat.codes.values.copy()
# Upcast if needed so that correct missing values can be set
if values.max() >= get_base_missing_value(dtype):
if dtype == np.int8:
dtype = np.int16
elif dtype == np.int16:
dtype = np.int32
else:
dtype = np.float64
values = np.array(values, dtype=dtype)
# Replace missing values with Stata missing value for type
values[values == -1] = get_base_missing_value(dtype)
data_formatted.append((col, values, index))
else:
data_formatted.append((col, data[col]))
return DataFrame.from_items(data_formatted)
def _replace_nans(self, data):
# return data
"""Checks floating point data columns for nans, and replaces these with
the generic Stata for missing value (.)"""
for c in data:
dtype = data[c].dtype
if dtype in (np.float32, np.float64):
if dtype == np.float32:
replacement = self.MISSING_VALUES['f']
else:
replacement = self.MISSING_VALUES['d']
data[c] = data[c].fillna(replacement)
return data
def _check_column_names(self, data):
"""
Checks column names to ensure that they are valid Stata column names.
This includes checks for:
* Non-string names
* Stata keywords
* Variables that start with numbers
* Variables with names that are too long
When an illegal variable name is detected, it is converted, and if
dates are exported, the variable name is propagated to the date
conversion dictionary
"""
converted_names = []
columns = list(data.columns)
original_columns = columns[:]
duplicate_var_id = 0
for j, name in enumerate(columns):
orig_name = name
if not isinstance(name, string_types):
name = text_type(name)
for c in name:
if (c < 'A' or c > 'Z') and (c < 'a' or c > 'z') and \
(c < '0' or c > '9') and c != '_':
name = name.replace(c, '_')
# Variable name must not be a reserved word
if name in self.RESERVED_WORDS:
name = '_' + name
# Variable name may not start with a number
if name[0] >= '0' and name[0] <= '9':
name = '_' + name
name = name[:min(len(name), 32)]
if not name == orig_name:
# check for duplicates
while columns.count(name) > 0:
# prepend ascending number to avoid duplicates
name = '_' + str(duplicate_var_id) + name
name = name[:min(len(name), 32)]
duplicate_var_id += 1
# need to possibly encode the orig name if its unicode
try:
orig_name = orig_name.encode('utf-8')
except:
pass
converted_names.append(
'{0} -> {1}'.format(orig_name, name))
columns[j] = name
data.columns = columns
# Check date conversion, and fix key if needed
if self._convert_dates:
for c, o in zip(columns, original_columns):
if c != o:
self._convert_dates[c] = self._convert_dates[o]
del self._convert_dates[o]
if converted_names:
import warnings
ws = invalid_name_doc.format('\n '.join(converted_names))
warnings.warn(ws, InvalidColumnName)
return data
def _prepare_pandas(self, data):
# NOTE: we might need a different API / class for pandas objects so
# we can set different semantics - handle this with a PR to pandas.io
data = data.copy()
if self._write_index:
data = data.reset_index()
# Ensure column names are strings
data = self._check_column_names(data)
# Check columns for compatibility with stata, upcast if necessary
# Raise if outside the supported range
data = _cast_to_stata_types(data)
# Replace NaNs with Stata missing values
data = self._replace_nans(data)
# Convert categoricals to int data, and strip labels
data = self._prepare_categoricals(data)
self.nobs, self.nvar = data.shape
self.data = data
self.varlist = data.columns.tolist()
dtypes = data.dtypes
# Ensure all date columns are converted
for col in data:
if col in self._convert_dates:
continue
if is_datetime64_dtype(data[col]):
self._convert_dates[col] = 'tc'
self._convert_dates = _maybe_convert_to_int_keys(self._convert_dates,
self.varlist)
for key in self._convert_dates:
new_type = _convert_datetime_to_stata_type(
self._convert_dates[key]
)
dtypes[key] = np.dtype(new_type)
self.typlist = []
self.fmtlist = []
for col, dtype in dtypes.iteritems():
self.fmtlist.append(_dtype_to_default_stata_fmt(dtype, data[col]))
self.typlist.append(_dtype_to_stata_type(dtype, data[col]))
# set the given format for the datetime cols
if self._convert_dates is not None:
for key in self._convert_dates:
self.fmtlist[key] = self._convert_dates[key]
def write_file(self):
self._file = _open_file_binary_write(
self._fname, self._encoding or self._default_encoding
)
try:
self._write_header(time_stamp=self._time_stamp,
data_label=self._data_label)
self._write_descriptors()
self._write_variable_labels()
# write 5 zeros for expansion fields
self._write(_pad_bytes("", 5))
self._prepare_data()
self._write_data()
self._write_value_labels()
finally:
self._file.close()
def _write_value_labels(self):
for vl in self._value_labels:
self._file.write(vl.generate_value_label(self._byteorder,
self._encoding))
def _write_header(self, data_label=None, time_stamp=None):
byteorder = self._byteorder
# ds_format - just use 114
self._file.write(struct.pack("b", 114))
# byteorder
self._write(byteorder == ">" and "\x01" or "\x02")
# filetype
self._write("\x01")
# unused
self._write("\x00")
# number of vars, 2 bytes
self._file.write(struct.pack(byteorder + "h", self.nvar)[:2])
# number of obs, 4 bytes
self._file.write(struct.pack(byteorder + "i", self.nobs)[:4])
# data label 81 bytes, char, null terminated
if data_label is None:
self._file.write(self._null_terminate(_pad_bytes("", 80)))
else:
self._file.write(
self._null_terminate(_pad_bytes(data_label[:80], 80))
)
# time stamp, 18 bytes, char, null terminated
# format dd Mon yyyy hh:mm
if time_stamp is None:
time_stamp = datetime.datetime.now()
elif not isinstance(time_stamp, datetime.datetime):
raise ValueError("time_stamp should be datetime type")
self._file.write(
self._null_terminate(time_stamp.strftime("%d %b %Y %H:%M"))
)
def _write_descriptors(self, typlist=None, varlist=None, srtlist=None,
fmtlist=None, lbllist=None):
nvar = self.nvar
# typlist, length nvar, format byte array
for typ in self.typlist:
self._write(typ)
# varlist names are checked by _check_column_names
# varlist, requires null terminated
for name in self.varlist:
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
# srtlist, 2*(nvar+1), int array, encoded by byteorder
srtlist = _pad_bytes("", 2 * (nvar + 1))
self._write(srtlist)
# fmtlist, 49*nvar, char array
for fmt in self.fmtlist:
self._write(_pad_bytes(fmt, 49))
# lbllist, 33*nvar, char array
for i in range(nvar):
# Use variable name when categorical
if self._is_col_cat[i]:
name = self.varlist[i]
name = self._null_terminate(name, True)
name = _pad_bytes(name[:32], 33)
self._write(name)
else: # Default is empty label
self._write(_pad_bytes("", 33))
def _write_variable_labels(self):
# Missing labels are 80 blank characters plus null termination
blank = _pad_bytes('', 81)
if self._variable_labels is None:
for i in range(self.nvar):
self._write(blank)
return
for col in self.data:
if col in self._variable_labels:
label = self._variable_labels[col]
if len(label) > 80:
raise ValueError('Variable labels must be 80 characters '
'or fewer')
is_latin1 = all(ord(c) < 256 for c in label)
if not is_latin1:
raise ValueError('Variable labels must contain only '
'characters that can be encoded in '
'Latin-1')
self._write(_pad_bytes(label, 81))
else:
self._write(blank)
def _prepare_data(self):
data = self.data
typlist = self.typlist
convert_dates = self._convert_dates
# 1. Convert dates
if self._convert_dates is not None:
for i, col in enumerate(data):
if i in convert_dates:
data[col] = _datetime_to_stata_elapsed_vec(data[col],
self.fmtlist[i])
# 2. Convert bad string data to '' and pad to correct length
dtype = []
data_cols = []
has_strings = False
for i, col in enumerate(data):
typ = ord(typlist[i])
if typ <= 244:
has_strings = True
data[col] = data[col].fillna('').apply(_pad_bytes, args=(typ,))
stype = 'S%d' % typ
dtype.append(('c' + str(i), stype))
string = data[col].str.encode(self._encoding)
data_cols.append(string.values.astype(stype))
else:
dtype.append(('c' + str(i), data[col].dtype))
data_cols.append(data[col].values)
dtype = np.dtype(dtype)
if has_strings:
self.data = np.fromiter(zip(*data_cols), dtype=dtype)
else:
self.data = data.to_records(index=False)
def _write_data(self):
data = self.data
data.tofile(self._file)
def _null_terminate(self, s, as_string=False):
null_byte = '\x00'
if compat.PY3 and not as_string:
s += null_byte
return s.encode(self._encoding)
else:
s += null_byte
return s
| gpl-3.0 |
centic9/subversion-ppa | tools/dev/graph-dav-servers.py | 5 | 5465 | #!/usr/bin/env python
#
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#
#
# graph-svn-dav.py by Brian W. Fitzpatrick <fitz@red-bean.com>
#
# This was originally a quick hack to make a pretty picture of svn DAV servers.
#
# I've dropped it in Subversion's repository at the request of Karl Fogel.
#
# Be warned this this script has many dependencies that don't ship with Python.
import sys
import os
import fileinput
import datetime
import time
import datetime
from matplotlib import dates
import matplotlib
matplotlib.use('Agg')
from matplotlib import pylab
import Image
OUTPUT_FILE = '../../www/images/svn-dav-securityspace-survey.png'
OUTPUT_IMAGE_WIDTH = 800
STATS = [
('1/1/2003', 70),
('2/1/2003', 158),
('3/1/2003', 222),
('4/1/2003', 250),
('5/1/2003', 308),
('6/1/2003', 369),
('7/1/2003', 448),
('8/1/2003', 522),
('9/1/2003', 665),
('10/1/2003', 782),
('11/1/2003', 969),
('12/1/2003', 1009),
('1/1/2004', 1162),
('2/1/2004', 1307),
('3/1/2004', 1424),
('4/1/2004', 1792),
('5/1/2004', 2113),
('6/1/2004', 2502),
('7/1/2004', 2941),
('8/1/2004', 3863),
('9/1/2004', 4174),
('10/1/2004', 4187),
('11/1/2004', 4783),
('12/1/2004', 4995),
('1/1/2005', 5565),
('2/1/2005', 6505),
('3/1/2005', 7897),
('4/1/2005', 8751),
('5/1/2005', 9793),
('6/1/2005', 11534),
('7/1/2005', 12808),
('8/1/2005', 13545),
('9/1/2005', 15233),
('10/1/2005', 17588),
('11/1/2005', 18893),
('12/1/2005', 20278),
('1/1/2006', 21084),
('2/1/2006', 23861),
('3/1/2006', 26540),
('4/1/2006', 29396),
('5/1/2006', 33001),
('6/1/2006', 35082),
('7/1/2006', 38939),
('8/1/2006', 40672),
('9/1/2006', 46525),
('10/1/2006', 54247),
('11/1/2006', 63145),
('12/1/2006', 68988),
('1/1/2007', 77027),
('2/1/2007', 84813),
('3/1/2007', 95679),
('4/1/2007', 103852),
('5/1/2007', 117267),
('6/1/2007', 133665),
('7/1/2007', 137575),
('8/1/2007', 155426),
('9/1/2007', 159055),
('10/1/2007', 169939),
('11/1/2007', 180831),
('12/1/2007', 187093),
('1/1/2008', 199432),
('2/1/2008', 221547),
('3/1/2008', 240794),
('4/1/2008', 255520),
('5/1/2008', 269478),
('6/1/2008', 286614),
('7/1/2008', 294579),
('8/1/2008', 307923),
('9/1/2008', 254757),
('10/1/2008', 268081),
('11/1/2008', 299071),
('12/1/2008', 330884),
('1/1/2009', 369719),
('2/1/2009', 378434),
('3/1/2009', 390502),
('4/1/2009', 408658),
('5/1/2009', 407044),
('6/1/2009', 406520),
('7/1/2009', 334276),
]
def get_date(raw_date):
month, day, year = map(int, raw_date.split('/'))
return datetime.datetime(year, month, day)
def get_ordinal_date(date):
# This is the only way I can get matplotlib to do the dates right.
return int(dates.date2num(get_date(date)))
def load_stats():
dates = [get_ordinal_date(date) for date, value in STATS]
counts = [x[1] for x in STATS]
return dates, counts
def draw_graph(dates, counts):
###########################################################
# Drawing takes place here.
pylab.figure(1)
ax = pylab.subplot(111)
pylab.plot_date(dates, counts,
color='r', linestyle='-', marker='o', markersize=3)
ax.xaxis.set_major_formatter( pylab.DateFormatter('%Y') )
ax.xaxis.set_major_locator( pylab.YearLocator() )
ax.xaxis.set_minor_locator( pylab.MonthLocator() )
ax.set_xlim( (dates[0] - 92, dates[len(dates) - 1] + 92) )
ax.yaxis.set_major_formatter( pylab.FormatStrFormatter('%d') )
pylab.ylabel('Total # of Public DAV Servers')
lastdate = datetime.datetime.fromordinal(dates[len(dates) - 1]).strftime("%B %Y")
pylab.xlabel("Data as of " + lastdate)
pylab.title('Security Space Survey of\nPublic Subversion DAV Servers')
# End drawing
###########################################################
png = open(OUTPUT_FILE, 'w')
pylab.savefig(png)
png.close()
os.rename(OUTPUT_FILE, OUTPUT_FILE + ".tmp.png")
try:
im = Image.open(OUTPUT_FILE + ".tmp.png", 'r')
(width, height) = im.size
print("Original size: %d x %d pixels" % (width, height))
scale = float(OUTPUT_IMAGE_WIDTH) / float(width)
width = OUTPUT_IMAGE_WIDTH
height = int(float(height) * scale)
print("Final size: %d x %d pixels" % (width, height))
im = im.resize((width, height), Image.ANTIALIAS)
im.save(OUTPUT_FILE, im.format)
os.unlink(OUTPUT_FILE + ".tmp.png")
except Exception, e:
sys.stderr.write("Error attempting to resize the graphic: %s\n" % (str(e)))
os.rename(OUTPUT_FILE + ".tmp.png", OUTPUT_FILE)
raise
pylab.close()
if __name__ == '__main__':
dates, counts = load_stats()
draw_graph(dates, counts)
print("Don't forget to update ../../www/svn-dav-securityspace-survey.html!")
| apache-2.0 |
VirusTotal/msticpy | tests/test_ip_utils.py | 1 | 3873 | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
"""IP Utils test class."""
import unittest
import json
import os
import pandas as pd
from msticpy.sectools.ip_utils import get_whois_info, get_whois_df, get_ip_type
_test_data_folders = [
d for d, _, _ in os.walk(os.getcwd()) if d.endswith("/tests/testdata")
]
if len(_test_data_folders) == 1:
_TEST_DATA = _test_data_folders[0]
else:
_TEST_DATA = "./tests/testdata"
class TestIpUtils(unittest.TestCase):
"""Unit test class."""
IPV4 = {
"Private": ("10.0.0.1", ["Private", "Reserved"]),
"Multicast": ("224.0.0.1", None),
"Unspecified": ("0.0.0.0", None),
"Reserved": ("198.51.100.1", ["Private", "Reserved"]),
"Loopback": ("127.0.0.1", None),
"Public": ("153.2.3.4", None),
"Link Local": ("169.254.0.1", None),
}
IPV6 = {
"Private": ("FC00::C001:1DFF:FEE0:0", None),
"Multicast": ("FF00::", None),
"Unspecified": ("::", None),
"Reserved": ("2001:db8::", ["Private", "Reserved"]),
"Loopback": ("::1", None),
"Public": ("2340:0023:AABA:0A01:0055:5054:9ABC:ABB0", None),
"Link Local": ("FE80::C001:1DFF:FEE0:0", None),
}
def setUp(self):
input_file = os.path.join(_TEST_DATA, "az_net_flows.csv")
self.input_df = pd.read_csv(input_file).sample(10)
def test_get_ip_type(self):
for ip_type, (addr, alts) in self.IPV4.items():
print(addr, ip_type)
if alts:
self.assertIn(get_ip_type(addr), alts)
else:
self.assertEqual(get_ip_type(addr), ip_type)
for ip_type, (addr, alts) in self.IPV6.items():
print(addr, ip_type)
if alts:
self.assertIn(get_ip_type(addr), alts)
else:
self.assertEqual(get_ip_type(addr), ip_type)
def test_get_whois(self):
ms_ip = "13.107.4.50"
ms_asn = "MICROSOFT-CORP-MSN-AS-BLOCK, US"
asn, whois = get_whois_info(ms_ip)
self.assertEqual(asn, ms_asn)
asn, whois = get_whois_info(self.IPV4["Private"][0])
invalid_type = "No ASN Information for IP type: Private"
self.assertEqual(asn, invalid_type)
def test_get_whois_df(self):
results = get_whois_df(data=self.input_df, ip_column="AllExtIPs")
self.assertEqual(len(results), len(self.input_df))
self.assertIn("AsnDescription", results.columns)
results2 = get_whois_df(
data=self.input_df, ip_column="AllExtIPs", asn_col="asn", whois_col="whois"
)
self.assertEqual(len(results2), len(self.input_df))
self.assertIn("asn", results2.columns)
self.assertIn("whois", results2.columns)
self.assertEqual(len(results2[~results2["asn"].isna()]), len(self.input_df))
self.assertEqual(len(results2[~results2["whois"].isna()]), len(self.input_df))
def test_whois_pdext(self):
results = self.input_df.mp_whois.lookup(ip_column="AllExtIPs")
self.assertEqual(len(results), len(self.input_df))
self.assertIn("AsnDescription", results.columns)
results2 = self.input_df.mp_whois.lookup(
ip_column="AllExtIPs", asn_col="asn", whois_col="whois"
)
self.assertEqual(len(results2), len(self.input_df))
self.assertIn("asn", results2.columns)
self.assertIn("whois", results2.columns)
self.assertEqual(len(results2[~results2["asn"].isna()]), len(self.input_df))
self.assertEqual(len(results2[~results2["whois"].isna()]), len(self.input_df))
| mit |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/pandas/io/tests/test_pickle.py | 7 | 10831 | # pylint: disable=E1101,E1103,W0232
""" manage legacy pickle tests """
import nose
import os
from distutils.version import LooseVersion
import pandas as pd
from pandas import Index
from pandas.compat import u, is_platform_little_endian
import pandas
import pandas.util.testing as tm
from pandas.tseries.offsets import Day, MonthEnd
class TestPickle():
"""
How to add pickle tests:
1. Install pandas version intended to output the pickle.
2. Execute "generate_legacy_storage_files.py" to create the pickle.
$ python generate_legacy_storage_files.py <output_dir> pickle
3. Move the created pickle to "data/legacy_pickle/<version>" directory.
NOTE: TestPickle can't be a subclass of tm.Testcase to use test generator.
http://stackoverflow.com/questions/6689537/
nose-test-generators-inside-class
"""
_multiprocess_can_split_ = True
def setUp(self):
from pandas.io.tests.generate_legacy_storage_files import (
create_pickle_data)
self.data = create_pickle_data()
self.path = u('__%s__.pickle' % tm.rands(10))
def compare_element(self, result, expected, typ, version=None):
if isinstance(expected, Index):
tm.assert_index_equal(expected, result)
return
if typ.startswith('sp_'):
comparator = getattr(tm, "assert_%s_equal" % typ)
comparator(result, expected, exact_indices=False)
elif typ == 'timestamp':
if expected is pd.NaT:
assert result is pd.NaT
else:
tm.assert_equal(result, expected)
tm.assert_equal(result.freq, expected.freq)
else:
comparator = getattr(tm, "assert_%s_equal" %
typ, tm.assert_almost_equal)
comparator(result, expected)
def compare(self, vf, version):
# py3 compat when reading py2 pickle
try:
data = pandas.read_pickle(vf)
except (ValueError) as e:
if 'unsupported pickle protocol:' in str(e):
# trying to read a py3 pickle in py2
return
else:
raise
for typ, dv in data.items():
for dt, result in dv.items():
try:
expected = self.data[typ][dt]
except (KeyError):
if version in ('0.10.1', '0.11.0') and dt == 'reg':
break
else:
raise
# use a specific comparator
# if available
comparator = "compare_{typ}_{dt}".format(typ=typ, dt=dt)
comparator = getattr(self, comparator, self.compare_element)
comparator(result, expected, typ, version)
return data
def compare_sp_series_ts(self, res, exp, typ, version):
# SparseTimeSeries integrated into SparseSeries in 0.12.0
# and deprecated in 0.17.0
if version and LooseVersion(version) <= "0.12.0":
tm.assert_sp_series_equal(res, exp, check_series_type=False)
else:
tm.assert_sp_series_equal(res, exp)
def compare_series_ts(self, result, expected, typ, version):
# GH 7748
tm.assert_series_equal(result, expected)
tm.assert_equal(result.index.freq, expected.index.freq)
tm.assert_equal(result.index.freq.normalize, False)
tm.assert_series_equal(result > 0, expected > 0)
# GH 9291
freq = result.index.freq
tm.assert_equal(freq + Day(1), Day(2))
res = freq + pandas.Timedelta(hours=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, hours=1))
res = freq + pandas.Timedelta(nanoseconds=1)
tm.assert_equal(isinstance(res, pandas.Timedelta), True)
tm.assert_equal(res, pandas.Timedelta(days=1, nanoseconds=1))
def compare_series_dt_tz(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_series_equal(result, expected)
else:
tm.assert_series_equal(result, expected)
def compare_series_cat(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_series_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_series_equal(result, expected, check_categorical=False)
else:
tm.assert_series_equal(result, expected)
def compare_frame_dt_mixed_tzs(self, result, expected, typ, version):
# 8260
# dtype is object < 0.17.0
if LooseVersion(version) < '0.17.0':
expected = expected.astype(object)
tm.assert_frame_equal(result, expected)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_onecol(self, result, expected, typ, version):
# Categorical dtype is added in 0.15.0
# ordered is changed in 0.16.0
if LooseVersion(version) < '0.15.0':
tm.assert_frame_equal(result, expected, check_dtype=False,
check_categorical=False)
elif LooseVersion(version) < '0.16.0':
tm.assert_frame_equal(result, expected, check_categorical=False)
else:
tm.assert_frame_equal(result, expected)
def compare_frame_cat_and_float(self, result, expected, typ, version):
self.compare_frame_cat_onecol(result, expected, typ, version)
def compare_index_period(self, result, expected, typ, version):
tm.assert_index_equal(result, expected)
tm.assertIsInstance(result.freq, MonthEnd)
tm.assert_equal(result.freq, MonthEnd())
tm.assert_equal(result.freqstr, 'M')
tm.assert_index_equal(result.shift(2), expected.shift(2))
def compare_sp_frame_float(self, result, expected, typ, version):
if LooseVersion(version) <= '0.18.1':
tm.assert_sp_frame_equal(result, expected, exact_indices=False,
check_dtype=False)
else:
tm.assert_sp_frame_equal(result, expected)
def read_pickles(self, version):
if not is_platform_little_endian():
raise nose.SkipTest("known failure on non-little endian")
pth = tm.get_data_path('legacy_pickle/{0}'.format(str(version)))
n = 0
for f in os.listdir(pth):
vf = os.path.join(pth, f)
data = self.compare(vf, version)
if data is None:
continue
n += 1
assert n > 0, 'Pickle files are not tested'
def test_pickles(self):
pickle_path = tm.get_data_path('legacy_pickle')
n = 0
for v in os.listdir(pickle_path):
pth = os.path.join(pickle_path, v)
if os.path.isdir(pth):
yield self.read_pickles, v
n += 1
assert n > 0, 'Pickle files are not tested'
def test_round_trip_current(self):
try:
import cPickle as c_pickle
def c_pickler(obj, path):
with open(path, 'wb') as fh:
c_pickle.dump(obj, fh, protocol=-1)
def c_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return c_pickle.load(fh)
except:
c_pickler = None
c_unpickler = None
import pickle as python_pickle
def python_pickler(obj, path):
with open(path, 'wb') as fh:
python_pickle.dump(obj, fh, protocol=-1)
def python_unpickler(path):
with open(path, 'rb') as fh:
fh.seek(0)
return python_pickle.load(fh)
for typ, dv in self.data.items():
for dt, expected in dv.items():
for writer in [pd.to_pickle, c_pickler, python_pickler]:
if writer is None:
continue
with tm.ensure_clean(self.path) as path:
# test writing with each pickler
writer(expected, path)
# test reading with each unpickler
result = pd.read_pickle(path)
self.compare_element(result, expected, typ)
if c_unpickler is not None:
result = c_unpickler(path)
self.compare_element(result, expected, typ)
result = python_unpickler(path)
self.compare_element(result, expected, typ)
def test_pickle_v0_14_1(self):
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_14_1.pickle')
# This code was executed once on v0.14.1 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
def test_pickle_v0_15_2(self):
# ordered -> _ordered
# GH 9347
# we have the name warning
# 10482
with tm.assert_produces_warning(UserWarning):
cat = pd.Categorical(values=['a', 'b', 'c'],
categories=['a', 'b', 'c', 'd'],
name='foobar', ordered=False)
pickle_path = os.path.join(tm.get_data_path(),
'categorical_0_15_2.pickle')
# This code was executed once on v0.15.2 to generate the pickle:
#
# cat = Categorical(labels=np.arange(3), levels=['a', 'b', 'c', 'd'],
# name='foobar')
# with open(pickle_path, 'wb') as f: pickle.dump(cat, f)
#
tm.assert_categorical_equal(cat, pd.read_pickle(pickle_path))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| mit |
DigitalSlideArchive/HistomicsTK | histomicstk/features/compute_intensity_features.py | 1 | 5874 | """Compute intensity features in labeled image."""
import numpy as np
import pandas as pd
import scipy.stats
from skimage.measure import regionprops
def compute_intensity_features(
im_label, im_intensity, num_hist_bins=10,
rprops=None, feature_list=None):
"""Calculate intensity features from an intensity image.
Parameters
----------
im_label : array_like
A labeled mask image wherein intensity of a pixel is the ID of the
object it belongs to. Non-zero values are considered to be foreground
objects.
im_intensity : array_like
Intensity image.
num_hist_bins: int, optional
Number of bins used to computed the intensity histogram of an object.
Histogram is used to energy and entropy features. Default is 10.
rprops : output of skimage.measure.regionprops, optional
rprops = skimage.measure.regionprops( im_label ). If rprops is not
passed then it will be computed inside which will increase the
computation time.
feature_list : list, default is None
list of intensity features to return.
If none, all intensity features are returned.
Returns
-------
fdata: pandas.DataFrame
A pandas dataframe containing the intensity features listed below for
each object/label.
Notes
-----
List of intensity features computed by this function:
Intensity.Min : float
Minimum intensity of object pixels.
Intensity.Max : float
Maximum intensity of object pixels.
Intensity.Mean : float
Mean intensity of object pixels
Intensity.Median : float
Median intensity of object pixels
Intensity.MeanMedianDiff : float
Difference between mean and median intensities of object pixels.
Intensity.Std : float
Standard deviation of the intensities of object pixels
Intensity.IQR: float
Inter-quartile range of the intensities of object pixels
Intensity.MAD: float
Median absolute deviation of the intensities of object pixels
Intensity.Skewness : float
Skewness of the intensities of object pixels. Value is 0 when all
intensity values are equal.
Intensity.Kurtosis : float
Kurtosis of the intensities of object pixels. Value is -3 when all
values are equal.
Intensity.HistEnergy : float
Energy of the intensity histogram of object pixels
Intensity.HistEntropy : float
Entropy of the intensity histogram of object pixels.
References
----------
.. [#] Daniel Zwillinger and Stephen Kokoska. "CRC standard probability
and statistics tables and formulae," Crc Press, 1999.
"""
default_feature_list = [
'Intensity.Min',
'Intensity.Max',
'Intensity.Mean',
'Intensity.Median',
'Intensity.MeanMedianDiff',
'Intensity.Std',
'Intensity.IQR',
'Intensity.MAD',
'Intensity.Skewness',
'Intensity.Kurtosis',
'Intensity.HistEnergy',
'Intensity.HistEntropy',
]
# List of feature names
if feature_list is None:
feature_list = default_feature_list
else:
assert all(j in default_feature_list for j in feature_list), \
"Some feature names are not recognized."
# compute object properties if not provided
if rprops is None:
rprops = regionprops(im_label)
# create pandas data frame containing the features for each object
numFeatures = len(feature_list)
numLabels = len(rprops)
fdata = pd.DataFrame(np.zeros((numLabels, numFeatures)),
columns=feature_list)
# conditionally execute calculations if x in the features list
def _conditional_execution(feature, func, *args, **kwargs):
if feature in feature_list:
fdata.at[i, feature] = func(*args, **kwargs)
def _return_input(x):
return x
for i in range(numLabels):
# get intensities of object pixels
pixelIntensities = np.sort(
im_intensity[rprops[i].coords[:, 0], rprops[i].coords[:, 1]]
)
# simple descriptors
meanIntensity = np.mean(pixelIntensities)
medianIntensity = np.median(pixelIntensities)
_conditional_execution('Intensity.Min', np.min, pixelIntensities)
_conditional_execution('Intensity.Max', np.max, pixelIntensities)
_conditional_execution('Intensity.Mean', _return_input, meanIntensity)
_conditional_execution(
'Intensity.Median', _return_input, medianIntensity)
_conditional_execution(
'Intensity.MeanMedianDiff', _return_input,
meanIntensity - medianIntensity)
_conditional_execution('Intensity.Std', np.std, pixelIntensities)
_conditional_execution(
'Intensity.Skewness', scipy.stats.skew, pixelIntensities)
_conditional_execution(
'Intensity.Kurtosis', scipy.stats.kurtosis, pixelIntensities)
# inter-quartile range
_conditional_execution(
'Intensity.IQR', scipy.stats.iqr, pixelIntensities)
# median absolute deviation
_conditional_execution(
'Intensity.MAD', np.median,
np.abs(pixelIntensities - medianIntensity))
# histogram-based features
if any(j in feature_list for j in [
'Intensity.HistEntropy', 'Intensity.HistEnergy']):
# compute intensity histogram
hist, bins = np.histogram(pixelIntensities, bins=num_hist_bins)
prob = hist/np.sum(hist, dtype=np.float32)
# entropy and energy
_conditional_execution(
'Intensity.HistEntropy', scipy.stats.entropy, prob)
_conditional_execution('Intensity.HistEnergy', np.sum, prob**2)
return fdata
| apache-2.0 |
kgullikson88/IGRINS_Scripts | Search_Fast.py | 1 | 2751 | import sys
import os
import GenericSearch
import pandas
# Define regions contaminated by telluric residuals or other defects. We will not use those regions in the cross-correlation
badregions = [[0, 1510], # Blue end of H band (lots of water absorption)
#[1561, 1615], # CO2 band that is often poorly corrected (for now at least...)
[1740, 2090], #In between H and K bands (lots of water absorption)
[2348, 2500], #Red end of K band (lots of water absorption)
[1510, 1520], #Temporary...
[1688,1740],
[2313, 2350]]
if "darwin" in sys.platform:
modeldir = "/Volumes/DATADRIVE/Stellar_Models/Sorted/Stellar/NearIR/"
elif "linux" in sys.platform:
modeldir = "/media/FreeAgent_Drive/SyntheticSpectra/Sorted/Stellar/NearIR/"
else:
modeldir = raw_input("sys.platform not recognized. Please enter model directory below: ")
if not modeldir.endswith("/"):
modeldir = modeldir + "/"
def add_oh_lines(oh_file, badregions=[], minstrength=1.0, tol=0.05):
oh_data = pandas.read_csv(oh_file, header=False, sep=" ", skipinitialspace=True, names=['wave', 'strength'])
oh = oh_data[oh_data['strength'] > minstrength]
n = 1.0 + 2.735182e-4 + 131.4182 / oh['wave'] ** 2 + 2.76249e8 / oh['wave'] ** 4
oh['wave'] = oh['wave'] / (n * 10.0)
for wave in oh['wave'].values:
badregions.append([wave - tol, wave + tol])
return badregions
if __name__ == "__main__":
#Parse command line arguments:
fileList = []
interp_regions = []
extensions = True
tellurics = False
trimsize = 100
for arg in sys.argv[1:]:
if "-e" in arg:
extensions = False
if "-t" in arg:
tellurics = True #telluric lines modeled but not removed
else:
fileList.append(arg)
# Add strong oh lines to interp_regions
oh_file = "{}/School/Research/IGRINS_data/plp/master_calib/ohlines.dat".format(os.environ['HOME'])
interp_regions = add_oh_lines(oh_file, badregions=interp_regions)
GenericSearch.CompanionSearch(fileList,
extensions=extensions,
resolution=45000.0,
trimsize=trimsize,
vsini_values=[1.0, 10.0, 20.0, 30.0, 40.0],
observatory="McDonald",
vbary_correct=True,
debug=False,
badregions=badregions,
interp_regions=interp_regions,
modeldir=modeldir,
addmode="weighted")
| mit |
SanPen/GridCal | src/research/opf/dc_opf_3.py | 1 | 8418 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This program implements the DC power flow as a linear program
This version uses the sparse structures and it the problem compilation is
blazing fast compared to the full matrix version
"""
from pulp import *
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from GridCal.Engine import *
class DcOpf3:
def __init__(self, multi_circuit: MultiCircuit):
"""
OPF simple dispatch problem
:param multi_circuit: GridCal Circuit instance (remember this must be a connected island)
"""
self.multi_circuit = multi_circuit
# circuit compilation
self.numerical_circuit = self.multi_circuit.compile_snapshot()
self.islands = self.numerical_circuit.compute()
self.Sbase = multi_circuit.Sbase
self.B = csc_matrix(self.numerical_circuit.get_B())
self.nbus = self.B.shape[0]
# node sets
self.pqpv = self.islands[0].pqpv
self.pv = self.islands[0].pv
self.vd = self.islands[0].ref
self.pq = self.islands[0].pq
# declare the voltage angles
self.theta = [None] * self.nbus
for i in range(self.nbus):
self.theta[i] = LpVariable("Theta" + str(i), -0.5, 0.5)
# declare the generation
self.PG = list()
def solve(self):
"""
Solve OPF using the sparse formulation
:return:
"""
'''
CSR format explanation:
The standard CSR representation where the column indices for row i are stored in
-> indices[indptr[i]:indptr[i+1]]
and their corresponding values are stored in
-> data[indptr[i]:indptr[i+1]]
If the shape parameter is not supplied, the matrix dimensions are inferred from the index arrays.
https://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.csr_matrix.html
'''
# print('Compiling LP')
prob = LpProblem("DC optimal power flow", LpMinimize)
################################################################################################################
# Add the objective function
################################################################################################################
fobj = 0
# add the voltage angles multiplied by zero (trick)
for j in self.pqpv:
fobj += self.theta[j] * 0.0
# Add the generators cost
for bus in self.multi_circuit.buses:
# check that there are at least one generator at the slack node
if len(bus.controlled_generators) == 0 and bus.type == BusMode.Slack:
raise Warning('There is no generator at the Slack node ' + bus.name + '!!!')
# Add the bus LP vars
for gen in bus.controlled_generators:
# create the generation variable
gen.initialize_lp_vars()
# add the variable to the objective function
fobj += gen.LPVar_P * gen.Cost
self.PG.append(gen.LPVar_P) # add the var reference just to print later...
# Add the objective function to the problem
prob += fobj
################################################################################################################
# Add the matrix multiplication as constraints
# See: https://math.stackexchange.com/questions/1727572/solving-a-feasible-system-of-linear-equations-
# using-linear-programming
################################################################################################################
for i in self.pqpv:
s = 0
d = 0
# add the calculated node power
for ii in range(self.B.indptr[i], self.B.indptr[i+1]):
j = self.B.indices[ii]
if j not in self.vd:
s += self.B.data[ii] * self.theta[j]
# add the generation LP vars
for gen in self.multi_circuit.buses[i].controlled_generators:
d += gen.LPVar_P
# add the nodal demand
for load in self.multi_circuit.buses[i].loads:
d -= load.P / self.Sbase
prob.add(s == d, 'ct_node_mismatch_' + str(i))
################################################################################################################
# set the slack nodes voltage angle
################################################################################################################
for i in self.vd:
prob.add(self.theta[i] == 0, 'ct_slack_theta')
################################################################################################################
# set the slack generator power
################################################################################################################
for i in self.vd:
val = 0
g = 0
# compute the slack node power
for ii in range(self.B.indptr[i], self.B.indptr[i+1]):
j = self.B.indices[ii]
val += self.B.data[ii] * self.theta[j]
# Sum the slack generators
for gen in self.multi_circuit.buses[i].controlled_generators:
g += gen.LPVar_P
# the sum of the slack node generators must be equal to the slack node power
prob.add(g == val, 'ct_slack_power_' + str(i))
################################################################################################################
# Set the branch limits
################################################################################################################
buses_dict = {bus: i for i, bus in enumerate(self.multi_circuit.buses)}
for k, branch in enumerate(self.multi_circuit.branches):
i = buses_dict[branch.bus_from]
j = buses_dict[branch.bus_to]
# branch flow
Fij = self.B[i, j] * (self.theta[i] - self.theta[j])
Fji = self.B[i, j] * (self.theta[j] - self.theta[i])
# constraints
prob.add(Fij <= branch.rate / self.Sbase, 'ct_br_flow_ij_' + str(k))
prob.add(Fji <= branch.rate / self.Sbase, 'ct_br_flow_ji_' + str(k))
################################################################################################################
# Solve
################################################################################################################
print('Solving LP')
prob.solve() # solve with CBC
# prob.solve(CPLEX())
# The status of the solution is printed to the screen
print("Status:", LpStatus[prob.status])
# The optimised objective function value is printed to the screen
print("Cost =", value(prob.objective), '€')
def print(self):
"""
Print results
:return:
"""
print('\nVoltages in p.u.')
for i, th in enumerate(self.theta):
print('Bus', i, '->', 1, '<', th.value(), 'rad')
print('\nGeneration power (in MW)')
for i, g in enumerate(self.PG):
val = g.value() * self.Sbase if g.value() is not None else 'None'
print(g.name, '->', val)
# Set the branch limits
print('\nBranch flows (in MW)')
buses_dict = {bus: i for i, bus in enumerate(self.multi_circuit.buses)}
for k, branch in enumerate(self.multi_circuit.branches):
i = buses_dict[branch.bus_from]
j = buses_dict[branch.bus_to]
if self.theta[i].value() is not None and self.theta[j].value() is not None:
F = self.B[i, j] * (self.theta[i].value() - self.theta[j].value()) * self.Sbase
else:
F = 'None'
print('Branch ' + str(i) + '-' + str(j) + '(', branch.rate, 'MW) ->', F)
if __name__ == '__main__':
fname = '/home/santi/Documentos/GitHub/GridCal/Grids_and_profiles/grids/Lynn 5 Bus pv.gridcal'
grid = FileOpen(fname).open()
# grid = FileOpen('IEEE30.xlsx').open()
# grid = FileOpen('Illinois200Bus.xlsx').open()
# declare and solve problem
problem = DcOpf3(grid)
problem.solve()
problem.print()
| gpl-3.0 |
jnez71/demos | signals/gaussian_markov_kernel.py | 1 | 1646 | #!/usr/bin/env python3
"""
Kernel of Gaussian-transition scalar Markov process?
"""
import numpy as np
from matplotlib import pyplot
npr = np.random
np.set_printoptions(suppress=True)
pyplot.rcParams["font.size"] = 16
pyplot.rcParams["axes.grid"] = True
################################################## SYSTEM
def initial(m=10.0, s=2.0):
return npr.normal(m, s) # gaussian initial-condition
def transition(x, s=1.0):
#f = 0.5*x # linear
f = 10*np.sin(2/(1+x**2)) # nonlinear
return f + npr.normal(0.0, s) # gaussian transition
def simulate(d):
X = [initial()]
for i in range(d-1):
X.append(transition(X[-1]))
return X # one sample from d-dimensional joint (only gaussian if linear transitions)
################################################## SIMULATE
d = 9
n = int(5e5)
print("Simulating samples...")
samples = np.array([simulate(d) for i in range(n)])
print("Computing statistics...")
mean = np.mean(samples, axis=0)
covar = np.cov(samples, rowvar=False)
################################################## VISUALIZE
print("========================================")
print(np.round(mean, 3), '\n')
print(np.round(covar, 3))
print("========================================")
print("Visualizing covariance...")
vmax = np.max(np.abs(covar))
pyplot.imshow(covar, cmap="coolwarm", vmin=-vmax, vmax=vmax, interpolation="lanczos")
pyplot.colorbar()
pyplot.grid(False)
pyplot.title("Covariance")
print("Visualizing joint...")
pyplot.figure()
pyplot.scatter(samples[::int(n/1e3+1), 0], samples[::int(n/1e3+1), -1], alpha=0.4)
pyplot.xlabel("x0")
pyplot.ylabel("x{0}".format(d-1))
pyplot.show()
| mit |
HubLot/PBxplore | pbxplore/tests/test_regression.py | 2 | 21493 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Regression tests for PBxplore.
This test suite run the various PBxplore programs with various argument, and
makes sure the output is the expected one. The aim is to check that the
programs are not broken during development.
Be careful this test suite does not test that the output is right. It just
test that the output is the expected one based on a previous version.
"""
# Use print as a function like in python 3
from os import path
from uuid import uuid1
from functools import wraps
import os
import subprocess
import shutil
import sys
import pytest
import MDAnalysis
import matplotlib
try:
import weblogo
IS_WEBLOGO = True
except ImportError:
IS_WEBLOGO = False
here = os.path.abspath(os.path.dirname(__file__))
# Resources for the tests are stored in the following directory
REFDIR = os.path.join(here, "test_data/")
class TemplateTestCase(object):
"""
Template TestCase class for the other TestCase class to inherit from.
Children class must overload the `_build_command_line` and the
`_validate_output` methods.
"""
def _run_program_and_validate(self, out_run_dir, reference, **kwargs):
"""
Run the program to test and validate its outputs.
"""
# Build the command line to run. This relies on the _build_command_line
# method that is a virtual method, which must be overloaded by the
# child class.
command = self._build_command_line(str(out_run_dir), **kwargs)
print(command)
# Run the command.
exe = subprocess.Popen(command,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = exe.communicate()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
# The return code should be 0.
assert exe.returncode == 0, 'Program exited with a {} code.'.format(
exe.returncode)
# Validate the output files. This relies on the _validate_output
# virtual method.
self._validate_output(str(out_run_dir), reference, **kwargs)
def _build_command_line(self, **kwargs):
"""
Build the command line to run.
This is a virtual method. It must be overloaded by the child class.
"""
raise NotImplementedError
def _validate_output(self, reference, **kwargs):
"""
Validate the output files.
This is a virtual method. It must be overloaded by the child class.
"""
raise NotImplementedError
class TestPBAssign(TemplateTestCase):
"""
Regression tests for PBAssign.py
"""
references = ["1BTA", "1AY7", "2LFU", "3ICH"]
extensions = [".pdb", ".cif.gz"]
def _run_PBassign(self, out_run_dir, pdbid, extension,
multiple=None, indir=REFDIR):
"""
Run a PBxplore program on a PDBID with the given options.
`options` is expected to be a list that will be directly passed to
subprocess, it must not contain the input or output options.
"""
if multiple is None:
test_input = path.join(REFDIR, pdbid + extension)
out_basename = path.join(out_run_dir, pdbid)
input_args = ['-p', test_input]
else:
input_args = []
for basename in pdbid:
input_args += ['-p', path.join(REFDIR, basename + extension)]
out_basename = path.join(out_run_dir, multiple)
run_list = (['PBassign'] + input_args + ['-o', out_basename + extension])
exe = subprocess.Popen(run_list,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = exe.communicate()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
return exe.returncode, out_run_dir
def _test_PBassign_options(self, out_run_dir, basename, extension, outfiles,
multiple=None, expected_exit=0):
out_run_dir = str(out_run_dir)
if multiple is not None:
out_name = multiple
status, out_run_dir = self._run_PBassign(out_run_dir, basename, extension, multiple)
assert status == expected_exit, \
'PBassign stoped with a {0} exit code'.format(status)
assert len(os.listdir(out_run_dir)) == len(outfiles),\
('PBassign did not produced the right number of files: '
'{0} files produced instead of {1}').format(
len(os.listdir(out_run_dir)), len(outfiles))
out_name = basename if multiple is None else multiple
for outfile in (template.format(out_name + extension)
for template in outfiles):
test_file = path.join(out_run_dir, outfile)
ref_file = path.join(REFDIR, outfile)
_assert_identical_files(test_file, ref_file)
@pytest.mark.parametrize('reference', references)
@pytest.mark.parametrize('extension', extensions)
def test_fasta(self, tmpdir, reference, extension):
"""
Run PBAssign on PDB files, and check the fasta output.
"""
self._test_PBassign_options(tmpdir, reference, extension,
['{0}.PB.fasta'])
@pytest.mark.parametrize('extension', extensions)
def test_multiple_inputs(self, tmpdir, extension):
"""
Run PBassign with multiple inputs.
"""
self._test_PBassign_options(tmpdir, self.references, extension,
['{0}.PB.fasta'], multiple='all')
def test_xtc_input(self, tmpdir):
"""
Run PBassign on a trajectory in the XTC format.
This test should produce the righ output with python 2. With python 3,
PBassign should fail as MDanalysis is not available.
"""
name = 'barstar_md_traj'
out_run_dir = str(tmpdir)
output_fname = name + '.PB.fasta'
call_list = ['PBassign',
'-x', os.path.join(REFDIR, name + '.xtc'),
'-g', os.path.join(REFDIR, name + '.gro'),
'-o', os.path.join(out_run_dir, name)]
exe = subprocess.Popen(call_list,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = exe.communicate()
status = exe.wait()
print(out.decode('utf-8'))
print(err.decode('utf-8'))
# MDanalysis is available, PBassign should run and produce the
# correct output
assert status == 0, 'PBassign exited with an error'
_assert_identical_files(os.path.join(REFDIR, output_fname),
os.path.join(out_run_dir, output_fname))
@pytest.mark.xfail(strict=True, raises=AssertionError)
def test_different_outputs(self, tmpdir):
"""
Test if the tests properly fail if an output content is different from
expected.
"""
reference = "test_fail"
extension = ".pdb"
self._test_PBassign_options(tmpdir, reference, extension, ['{0}.PB.fasta'])
class TestPBcount(TemplateTestCase):
"""
Test running PBcount.
"""
def _build_command_line(self, out_run_dir, input_files, output, first_residue=None):
output_full_path = os.path.join(out_run_dir, output)
command = ['PBcount', '-o', output_full_path]
for input_file in input_files:
command += ['-f', os.path.join(REFDIR, input_file)]
if first_residue is not None:
command += ['--first-residue', str(first_residue)]
return command
def _validate_output(self, out_run_dir, reference, output, **kwargs):
reference_full_path = os.path.join(REFDIR, reference)
output_full_path = os.path.join(out_run_dir,
output + '.PB.count')
_assert_identical_files(output_full_path, reference_full_path)
def test_single_file_single_model(self, tmpdir):
"""
Run PBcount with a single input file that contains a single model.
"""
input_files = ['count_single1.PB.fasta', ]
output = 'output'
reference = 'count_single1.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_single_file_multiple_models(self, tmpdir):
"""
Run PBcount with a single input file that contains multiple models.
"""
input_files = ['count_multi1.PB.fasta', ]
output = 'output'
reference = 'count_multi1.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_multiple_files_single_model(self, tmpdir):
"""
Run PBcount with multiple input files that contain a single model.
"""
input_files = ['count_single1.PB.fasta',
'count_single2.PB.fasta',
'count_single3.PB.fasta']
output = 'output'
reference = 'count_single123.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_multiple_files_multiple_models(self, tmpdir):
"""
Run PBcount with multiple input files that contain multiple models each.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output)
def test_first_residue_positive(self, tmpdir):
"""
Test PBcount on with the --first-residue option and a positive value.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123_first20.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output,
first_residue=20)
def test_first_residue_negative(self, tmpdir):
"""
Test PBcount on with the --first-residue option and a negative value.
"""
input_files = ['count_multi1.PB.fasta',
'count_multi2.PB.fasta',
'count_multi3.PB.fasta']
output = 'output'
reference = 'count_multi123_first-20.PB.count'
self._run_program_and_validate(tmpdir, reference,
input_files=input_files, output=output,
first_residue=-20)
class TestPBstat(TemplateTestCase):
def _build_command_line(self, out_run_dir, input_file, output,
mapdist=False, neq=False,
logo=False, image_format=None,
residue_min=None, residue_max=None):
input_full_path = os.path.join(REFDIR, input_file)
output_full_path = os.path.join(str(out_run_dir), output)
command = ['PBstat', '-f', input_full_path, '-o', output_full_path]
if mapdist:
command += ['--map']
if neq:
command += ['--neq']
if logo:
command += ['--logo']
if image_format is not None:
command += ['--image-format', image_format]
if residue_min is not None:
command += ['--residue-min', str(residue_min)]
if residue_max is not None:
command += ['--residue-max', str(residue_max)]
return command
def _validate_output(self, out_run_dir, reference, input_file, output,
mapdist=False, neq=False, logo=False, image_format=None,
residue_min=None, residue_max=None, **kwargs):
suffix_residue = ''
if residue_min or residue_max:
suffix_residue = ".{}-{}".format(residue_min, residue_max)
suffix_args = ''
extension = '.png'
if neq:
suffix_args = '.Neq'
if mapdist:
suffix_args = '.map'
if logo:
suffix_args = '.logo'
if image_format is None:
extension = '.png'
else:
extension = '.' + image_format
reference_full_path = os.path.join(REFDIR, reference + '.PB'
+ suffix_args + suffix_residue)
output = os.path.join(str(out_run_dir), output)
output_full_path = output + '.PB' + suffix_args + suffix_residue
if neq:
# Assess the validity of the Neq file
_assert_identical_files(output_full_path, reference_full_path)
# Assess the creation of the graph file (png or pdf)
value, msg = _file_validity(output_full_path + extension)
assert value, msg
def test_neq(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True)
self._run_program_and_validate(tmpdir,
reference='count_single123',
input_file='count_single123.PB.count',
output='output',
neq=True)
def test_neq_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True,
residue_min=10, residue_max=30)
def test_neq_with_first_residue(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123_first20',
input_file='count_multi123_first20.PB.count',
output='output',
neq=True)
def test_neq_with_first_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123_first20',
input_file='count_multi123_first20.PB.count',
output='output',
neq=True,
residue_min=25, residue_max=35)
def test_neq_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
neq=True, image_format='pdf')
def test_mapdist(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True)
def test_mapdist_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True, image_format='pdf')
def test_mapdist_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
mapdist=True,
residue_min=10, residue_max=30)
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True)
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_pdf(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='pdf')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_png(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='png')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_logo_jpg(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='jpg')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
@pytest.mark.xfail(strict=True, raises=AssertionError)
def test_weblogo_logo_invalid_format(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True, image_format='invalid')
@pytest.mark.skipif(not IS_WEBLOGO, reason="Weblogo is not present")
def test_weblogo_with_range_residues(self, tmpdir):
self._run_program_and_validate(tmpdir,
reference='count_multi123',
input_file='count_multi123.PB.count',
output='output',
logo=True,
residue_min=10, residue_max=30)
def _file_validity(file_a):
"""
Check wether file_a exists and is not empty.
Return a tuple containing:
- True if all went well, False otherwise
- the error message, empty if True is returned
"""
if os.path.isfile(file_a):
if os.path.getsize(file_a) > 0:
return True, ''
else:
return False, '{0} is empty'.format(file_a)
else:
return False, '{0} does not exist'.format(file_a)
def _same_file_content(file_a, file_b, comment_char=">"):
"""
Return True if two files are identical. Take file path as arguments.
Ignore the content of lines which start with `comment_char`.
"""
with open(file_a) as f1, open(file_b) as f2:
# Compare content line by line
for f1_line, f2_line in zip(f1, f2):
if (f1_line != f2_line):
# If both lines start with a comment,
# it's a valid one no matter the content of the comment
f1_firstchar = f1_line.strip().startswith(comment_char)
f2_firstchar = f2_line.strip().startswith(comment_char)
if f1_firstchar != f2_firstchar:
print(file_a, file_b)
print(f1_line, f2_line, sep='//')
return False
# Check if one file is longer than the other; it would result as one
# file iterator not completely consumed
for infile in (f1, f2):
try:
next(infile)
except StopIteration:
pass
else:
# The iterator is not consumed, it means that this file is
# longer than the other
print('File too long')
return False
# If we reach this line, it means that we did not find any difference
return True
def _assert_identical_files(file_a, file_b, comment_char=">"):
"""
Raise an Assert exception if the two files are not identical.
Take file path as arguments.
Ignore the content of lines which start with `comment_char`.
"""
assert _same_file_content(file_a, file_b), '{0} and {1} are not identical'\
.format(file_a, file_b)
| mit |
JPFrancoia/scikit-learn | sklearn/preprocessing/data.py | 13 | 70436 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import row_norms
from ..utils.extmath import _incremental_mean_and_var
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.fixes import bincount
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
StandardScaler: Performs scaling to unit variance using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
See also
--------
minmax_scale: Equivalent function without the object oriented API.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface
to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range : tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MinMaxScaler: Performs scaling to a given range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with 'whiten=True'.
""" # noqa
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. "
"Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
maxabs_scale: Equivalent function without the object oriented API.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
See also
--------
MaxAbsScaler: Performs scaling to the [-1, 1] range using the``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
""" # noqa
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the quantile range (defaults to IQR: Interquartile Range).
The IQR is the range between the 1st quartile (25th quantile)
and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
robust_scale: Equivalent function without the object oriented API.
:class:`sklearn.decomposition.PCA`
Further removes the linear correlation across features with
'whiten=True'.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
https://en.wikipedia.org/wiki/Median_(statistics)
https://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.quantile_range = quantile_range
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q_min, q_max = self.quantile_range
if not 0 <= q_min <= q_max <= 100:
raise ValueError("Invalid quantile range: %s" %
str(self.quantile_range))
q = np.percentile(X, self.quantile_range, axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True,
quantile_range=(25.0, 75.0), copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
quantile_range : tuple (q_min, q_max), 0.0 < q_min < q_max < 100.0
Default: (25.0, 75.0) = (1st quantile, 3rd quantile) = IQR
Quantile range used to calculate ``scale_``.
.. versionadded:: 0.18
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
RobustScaler: Performs centering and scaling using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
quantile_range=quantile_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_output_features, n_input_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<sphx_glr_auto_examples_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(bincount(c, minlength=self.n_input_features_)
for c in combinations)
def get_feature_names(self, input_features=None):
"""
Return feature names for output features
Parameters
----------
input_features : list of string, length n_features, optional
String names for input features if available. By default,
"x0", "x1", ... "xn_features" is used.
Returns
-------
output_feature_names : list of string, length n_output_features
"""
powers = self.powers_
if input_features is None:
input_features = ['x%d' % i for i in range(powers.shape[1])]
feature_names = []
for row in powers:
inds = np.where(row)[0]
if len(inds):
name = " ".join("%s^%d" % (input_features[ind], exp)
if exp != 1 else input_features[ind]
for ind, exp in zip(inds, row[inds]))
else:
name = "1"
feature_names.append(name)
return feature_names
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
Normalizer: Performs normalization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
normalize: Equivalent function without the object oriented API.
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
Binarizer: Performs binarization using the ``Transformer`` API
(e.g. as part of a preprocessing :class:`sklearn.pipeline.Pipeline`).
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
binarize: Equivalent function without the object oriented API.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
@property
def _pairwise(self):
return True
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Note: a one-hot encoding of y labels should use a LabelBinarizer
instead.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be
in ``range(n_values[i])``
categorical_features : "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
sklearn.preprocessing.LabelBinarizer : binarizes labels in a one-vs-all
fashion.
sklearn.preprocessing.MultiLabelBinarizer : transforms between iterable of
iterables and a multilabel format, e.g. a (samples x classes) binary
matrix indicating the presence of a class label.
sklearn.preprocessing.LabelEncoder : encodes labels with values between 0
and n_classes-1.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if (isinstance(self.n_values, six.string_types) and
self.n_values == 'auto'):
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
jm-begon/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 242 | 5885 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhoette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distict cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhoutte score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
carpyncho/feets | doc/source/JSAnimation/examples.py | 4 | 3126 | import numpy as np
from matplotlib import pyplot as plt
from matplotlib import animation
from JSAnimation import IPython_display
def basic_animation(frames=100, interval=30):
"""Plot a basic sine wave with oscillating amplitude"""
fig = plt.figure()
ax = plt.axes(xlim=(0, 10), ylim=(-2, 2))
line, = ax.plot([], [], lw=2)
x = np.linspace(0, 10, 1000)
def init():
line.set_data([], [])
return line,
def animate(i):
y = np.cos(i * 0.02 * np.pi) * np.sin(x - i * 0.02 * np.pi)
line.set_data(x, y)
return line,
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval)
def lorenz_animation(N_trajectories=20, rseed=1, frames=200, interval=30):
"""Plot a 3D visualization of the dynamics of the Lorenz system"""
from scipy import integrate
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.colors import cnames
def lorentz_deriv(coords, t0, sigma=10., beta=8./3, rho=28.0):
"""Compute the time-derivative of a Lorentz system."""
x, y, z = coords
return [sigma * (y - x), x * (rho - z) - y, x * y - beta * z]
# Choose random starting points, uniformly distributed from -15 to 15
np.random.seed(rseed)
x0 = -15 + 30 * np.random.random((N_trajectories, 3))
# Solve for the trajectories
t = np.linspace(0, 2, 500)
x_t = np.asarray([integrate.odeint(lorentz_deriv, x0i, t)
for x0i in x0])
# Set up figure & 3D axis for animation
fig = plt.figure()
ax = fig.add_axes([0, 0, 1, 1], projection='3d')
ax.axis('off')
# choose a different color for each trajectory
colors = plt.cm.jet(np.linspace(0, 1, N_trajectories))
# set up lines and points
lines = sum([ax.plot([], [], [], '-', c=c)
for c in colors], [])
pts = sum([ax.plot([], [], [], 'o', c=c, ms=4)
for c in colors], [])
# prepare the axes limits
ax.set_xlim((-25, 25))
ax.set_ylim((-35, 35))
ax.set_zlim((5, 55))
# set point-of-view: specified by (altitude degrees, azimuth degrees)
ax.view_init(30, 0)
# initialization function: plot the background of each frame
def init():
for line, pt in zip(lines, pts):
line.set_data([], [])
line.set_3d_properties([])
pt.set_data([], [])
pt.set_3d_properties([])
return lines + pts
# animation function: called sequentially
def animate(i):
# we'll step two time-steps per frame. This leads to nice results.
i = (2 * i) % x_t.shape[1]
for line, pt, xi in zip(lines, pts, x_t):
x, y, z = xi[:i + 1].T
line.set_data(x, y)
line.set_3d_properties(z)
pt.set_data(x[-1:], y[-1:])
pt.set_3d_properties(z[-1:])
ax.view_init(30, 0.3 * i)
fig.canvas.draw()
return lines + pts
return animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=interval)
| mit |
mizzao/ggplot | ggplot/stats/stat_function.py | 12 | 4439 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from ggplot.utils import make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
class stat_function(stat):
"""
Superimpose a function onto a plot
Uses a
Parameters
----------
x : list, 1darray
x values of data
fun : function
Function to draw.
n : int
Number of points to interpolate over. Must be greater than zero.
Defaults to 101.
color : str
Color to draw function with.
args : list, dict, object
List or dict of additional arguments to pass to function. If neither
list or dict, object is passed as second argument.
Examples
--------
Sin vs cos.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
gg = ggplot(pd.DataFrame({'x':np.arange(10)}),aes(x='x'))
gg = gg + stat_function(fun=np.sin,color="red")
gg = gg + stat_function(fun=np.cos,color="blue")
print(gg)
Compare random sample density to normal distribution.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.normal(size=100)
# normal distribution function
def dnorm(n):
return (1.0 / np.sqrt(2 * np.pi)) * (np.e ** (-0.5 * (n ** 2)))
data = pd.DataFrame({'x':x})
gg = ggplot(aes(x='x'),data=data) + geom_density()
gg = gg + stat_function(fun=dnorm,n=150)
print(gg)
Passing additional arguments to function as list.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.randn(100)
to_the_power_of = lambda n, p: n ** p
y = x ** 3
y += np.random.randn(100) # add noise
data = pd.DataFrame({'x':x,'y':y})
gg = ggplot(aes(x='x',y='y'),data=data) + geom_point()
gg = gg + stat_function(fun=to_the_power_of,args=[3])
print(gg)
Passing additional arguments to function as dict.
.. plot::
:include-source:
import scipy
import numpy as np
import pandas as pd
from ggplot import *
def dnorm(x, mean, var):
return scipy.stats.norm(mean,var).pdf(x)
data = pd.DataFrame({'x':np.arange(-5,6)})
gg = ggplot(aes(x='x'),data=data)
gg = gg + stat_function(fun=dnorm,color="blue",args={'mean':0.0,'var':0.2})
gg = gg + stat_function(fun=dnorm,color="red",args={'mean':0.0,'var':1.0})
gg = gg + stat_function(fun=dnorm,color="yellow",args={'mean':0.0,'var':5.0})
gg = gg + stat_function(fun=dnorm,color="green",args={'mean':-2.0,'var':0.5})
print(gg)
"""
# TODO: Should not have a required aesthetic, use the scale information
# maybe that is where the "scale trainning" helps
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'path', 'position': 'identity', 'fun': None,
'n': 101, 'args': None}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
CREATES = {'y'}
def _calculate(self, data):
x = data.pop('x')
fun = self.params['fun']
n = self.params['n']
args = self.params['args']
if not hasattr(fun, '__call__'):
raise GgplotError("stat_function requires parameter 'fun' to be " +
"a function or any other callable object")
old_fun = fun
if isinstance(args,list):
fun = lambda x: old_fun(x, *args)
elif isinstance(args,dict):
fun = lambda x: old_fun(x, **args)
elif args is not None:
fun = lambda x: old_fun(x, args)
else:
fun = lambda x: old_fun(x)
x = np.linspace(x.min(), x.max(),n)
y = list(map(fun, x))
new_data = pd.DataFrame({'x': x, 'y': y})
# Copy the other aesthetics into the new dataframe
# Don't copy the any previous 'y' assignments
try:
del data['y']
except KeyError:
pass
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| bsd-2-clause |
yyjiang/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
pyoceans/pocean-core | pocean/dsg/trajectory/cr.py | 1 | 10376 | #!python
# coding=utf-8
from copy import copy
from collections import OrderedDict
import numpy as np
import pandas as pd
from pocean.utils import (
create_ncvar_from_series,
dict_update,
downcast_dataframe,
generic_masked,
get_default_axes,
get_dtype,
get_mapped_axes_variables,
get_masked_datetime_array,
get_ncdata_from_series,
nativize_times,
normalize_countable_array,
)
from pocean.cf import CFDataset, cf_safe_name
from pocean.dsg.trajectory import trajectory_calculated_metadata
from pocean import logger as L # noqa
class ContiguousRaggedTrajectory(CFDataset):
@classmethod
def is_mine(cls, dsg, strict=False):
try:
rvars = dsg.filter_by_attrs(cf_role='trajectory_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'trajectory'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
assert len(dsg.z_axes()) >= 1
o_index_vars = dsg.filter_by_attrs(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 1
assert o_index_vars[0].sample_dimension in dsg.dimensions # Sample dimension
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except BaseException:
if strict is True:
raise
return False
return True
@classmethod
def from_dataframe(cls, df, output, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
daxes = axes
# Should never be a CR file with one trajectory so we ignore the "reduce_dims" attribute
_ = kwargs.pop('reduce_dims', False) # noqa
unlimited = kwargs.pop('unlimited', False)
unique_dims = kwargs.pop('unique_dims', False)
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not support in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)
# Downcast anything from int64 to int32
# Convert any timezone aware datetimes to native UTC times
df = downcast_dataframe(nativize_times(df))
with ContiguousRaggedTrajectory(output, 'w') as nc:
trajectory_groups = df.groupby(axes.trajectory)
unique_trajectories = list(trajectory_groups.groups.keys())
num_trajectories = len(unique_trajectories)
nc.createDimension(daxes.trajectory, num_trajectories)
trajectory = nc.createVariable(axes.trajectory, get_dtype(df[axes.trajectory]), (daxes.trajectory,))
# Get unique obs by grouping on traj getting the max size
if unlimited is True:
nc.createDimension(daxes.sample, None)
else:
nc.createDimension(daxes.sample, len(df))
# Number of observations in each trajectory
row_size = nc.createVariable('rowSize', 'i4', (daxes.trajectory,))
attributes = dict_update(nc.nc_attributes(axes, daxes), kwargs.pop('attributes', {}))
# Variables defined on only the trajectory axis
traj_vars = kwargs.pop('traj_vars', [])
traj_columns = [ p for p in traj_vars if p in df.columns ]
for c in traj_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
create_ncvar_from_series(
nc,
var_name,
(daxes.trajectory,),
df[c],
zlib=True,
complevel=1
)
for i, (trajid, trg) in enumerate(trajectory_groups):
trajectory[i] = trajid
row_size[i] = len(trg)
# Save any trajectory variables using the first value found
# in the column.
for c in traj_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
continue
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(trg[c], v)[0]
try:
v[i] = vvalues
except BaseException:
L.exception('Failed to add {}'.format(c))
continue
# Add all of the columns based on the sample dimension. Take all columns and remove the
# trajectory, rowSize and other trajectory based columns.
sample_columns = [
f for f in df.columns if f not in traj_columns + ['rowSize', axes.trajectory]
]
for c in sample_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
v = create_ncvar_from_series(
nc,
var_name,
(daxes.sample,),
df[c],
zlib=True,
complevel=1
)
else:
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(df[c], v)
try:
if unlimited is True:
v[:] = vvalues
else:
v[:] = vvalues.reshape(v.shape)
except BaseException:
L.exception('Failed to add {}'.format(c))
continue
# Metadata variables
if 'crs' not in nc.variables:
nc.createVariable('crs', 'i4')
# Set attributes
nc.update_attributes(attributes)
return ContiguousRaggedTrajectory(output, **kwargs)
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
if df is None:
df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows, axes=axes)
return trajectory_calculated_metadata(df, axes, geometries)
def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
axv = get_mapped_axes_variables(self, axes)
o_index_var = self.filter_by_attrs(sample_dimension=lambda x: x is not None)
if not o_index_var:
raise ValueError(
'Could not find the "sample_dimension" attribute on any variables, '
'is this a valid {}?'.format(self.__class__.__name__)
)
else:
o_index_var = o_index_var[0]
o_dim = self.dimensions[o_index_var.sample_dimension] # Sample dimension
t_dim = o_index_var.dimensions
# Trajectory
row_sizes = o_index_var[:]
traj_data = normalize_countable_array(axv.trajectory)
traj_data = np.repeat(traj_data, row_sizes)
# time
time_data = get_masked_datetime_array(axv.t[:], axv.t).flatten()
df_data = OrderedDict([
(axes.t, time_data),
(axes.trajectory, traj_data)
])
building_index_to_drop = np.ones(o_dim.size, dtype=bool)
extract_vars = copy(self.variables)
# Skip the time and row index variables
del extract_vars[o_index_var.name]
del extract_vars[axes.t]
for i, (dnam, dvar) in enumerate(extract_vars.items()):
# Trajectory dimensions
if dvar.dimensions == t_dim:
vdata = np.repeat(generic_masked(dvar[:], attrs=self.vatts(dnam)), row_sizes)
# Sample dimensions
elif dvar.dimensions == (o_dim.name,):
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
else:
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
continue
# Mark rows with data so we don't remove them with clear_rows
if vdata.size == building_index_to_drop.size:
building_index_to_drop = (building_index_to_drop == True) & (vdata.mask == True) # noqa
# Handle scalars here at the end
if vdata.size == 1:
vdata = vdata[0]
df_data[dnam] = vdata
df = pd.DataFrame(df_data)
# Drop all data columns with no data
if clean_cols:
df = df.dropna(axis=1, how='all')
# Drop all data rows with no data variable data
if clean_rows:
df = df.iloc[~building_index_to_drop]
return df
def nc_attributes(self, axes, daxes):
atts = super(ContiguousRaggedTrajectory, self).nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'trajectory',
'cdm_data_type': 'Trajectory'
},
axes.trajectory: {
'cf_role': 'trajectory_id',
'long_name' : 'trajectory identifier',
'ioos_category': 'identifier'
},
axes.x: {
'axis': 'X'
},
axes.y: {
'axis': 'Y'
},
axes.z: {
'axis': 'Z'
},
axes.t: {
'units': self.default_time_unit,
'standard_name': 'time',
'axis': 'T'
},
'rowSize': {
'sample_dimension': daxes.sample
}
})
| mit |
nsalomonis/AltAnalyze | AltAnalyzeViewer.py | 1 | 282646 | import os.path, sys, shutil
import os
import string, re
import subprocess
import numpy as np
import unique
import traceback
import wx
import wx.lib.scrolledpanel
import wx.grid as gridlib
try:
import warnings
with warnings.catch_warnings():
warnings.filterwarnings("ignore",category=UserWarning) ### hides import warnings
import matplotlib
#try: matplotlib.use('TkAgg')
#except Exception: pass
#import matplotlib.pyplot as plt ### Backend conflict issue when called prior to the actual Wx window appearing
#matplotlib.use('WXAgg')
from matplotlib.backends.backend_wx import NavigationToolbar2Wx
from matplotlib.figure import Figure
from numpy import arange, sin, pi
except Exception: pass
if os.name == 'nt': bheight=20
else: bheight=10
rootDirectory = unique.filepath(str(os.getcwd()))
currentDirectory = unique.filepath(str(os.getcwd())) + "/" + "Config/" ### NS-91615 alternative to __file__
currentDirectory = string.replace(currentDirectory,'AltAnalyzeViewer.app/Contents/Resources','')
os.chdir(currentDirectory)
parentDirectory = str(os.getcwd()) ### NS-91615 gives the parent AltAnalyze directory
sys.path.insert(1,parentDirectory) ### NS-91615 adds the AltAnalyze modules to the system path to from visualization_scripts import clustering and others
import UI
#These classes set up the "tab" feature in the program, allowing you to switch the viewer to different modes.
class PageTwo(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour("white")
myGrid = ""
class PageThree(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.SetBackgroundColour("white")
class PageFour(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
class PageFive(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
class Main(wx.Frame):
def __init__(self,parent,id):
wx.Frame.__init__(self, parent, id,'AltAnalyze Results Viewer', size=(900,610))
self.Show()
self.Maximize(True) #### This allows the frame to resize to the host machine's max size
self.heatmap_translation = {}
self.heatmap_run = {}
self.species = 'Hs'
self.platform = 'RNASeq'
self.geneset_type = 'WikiPathways'
self.supported_genesets = []
self.runPCA = False
self.SetBackgroundColour((230, 230, 230))
self.species=''
#PANELS & WIDGETS
#self.panel is one of the TOP PANELS. These are used for title display, the open project button, and sort & filter buttons.
self.panel = wx.Panel(self, id=2, pos=(200,0), size=(600,45), style=wx.RAISED_BORDER)
self.panel.SetBackgroundColour((110, 150, 250))
#Panel 2 is the main view panel.
self.panel2 = wx.Panel(self, id=3, pos=(200,50), size=(1400,605), style=wx.RAISED_BORDER)
self.panel2.SetBackgroundColour((218, 218, 218))
#Panel 3 contains the pseudo-directory tree.
self.panel3 = wx.Panel(self, id=4, pos=(0,50), size=(200,625), style=wx.RAISED_BORDER)
self.panel3.SetBackgroundColour("white")
self.panel4 = wx.Panel(self, id=5, pos=(200,650), size=(1400,150), style=wx.RAISED_BORDER)
self.panel4.SetBackgroundColour("black")
#These are the other top panels.
self.panel_left = wx.Panel(self, id=12, pos=(0,0), size=(200,45), style=wx.RAISED_BORDER)
self.panel_left.SetBackgroundColour((218, 218, 218))
self.panel_right = wx.Panel(self, id=11, pos=(1100,0), size=(200,45), style=wx.RAISED_BORDER)
self.panel_right.SetBackgroundColour((218, 218, 218))
self.panel_right2 = wx.Panel(self, id=13, pos=(1300,0), size=(300,45), style=wx.RAISED_BORDER)
self.panel_right2.SetBackgroundColour((218, 218, 218))
self.panel_right2.SetMaxSize([300, 45])
#Lines 81-93 set up the user input box for the "sort" function (used on the table).
self.sortbox = wx.TextCtrl(self.panel_right2, id=7, pos=(55,10), size=(40,25))
wx.Button(self.panel_right2, id=8, label="Sort", pos=(5, 12), size=(40, bheight))
self.Bind(wx.EVT_BUTTON, self.SortTablefromButton, id=8)
self.AscendingRadio = wx.RadioButton(self.panel_right2, id=17, label="Sort", pos=(100, 3), size=(12, 12))
self.DescendingRadio = wx.RadioButton(self.panel_right2, id=18, label="Sort", pos=(100, 23), size=(12, 12))
font = wx.Font(10, wx.SWISS, wx.NORMAL, wx.BOLD)
self.AscendingOpt = wx.StaticText(self.panel_right2, label="Ascending", pos=(115, 1))
self.AscendingOpt.SetFont(font)
self.DescendingOpt = wx.StaticText(self.panel_right2, label="Descending", pos=(115, 21))
self.DescendingOpt.SetFont(font)
#Lines 96-98 set up the user input box for the "filter" function (used on the table).
self.filterbox = wx.TextCtrl(self.panel_right, id=9, pos=(60,10), size=(125,25))
wx.Button(self.panel_right, id=10, label="Filter", pos=(0, 12), size=(50, bheight))
self.Bind(wx.EVT_BUTTON, self.FilterTablefromButton, id=10)
#Lines 101-103 set up the in-program log.
self.control = wx.TextCtrl(self.panel4, id=6, pos=(1,1), size=(1400,150), style=wx.TE_MULTILINE)
self.control.write("Welcome to AltAnalyze Results Viewer!" + "\n")
self.Show(True)
self.main_results_directory = ""
#self.browser is the "directory tree" where groups of files are instantiated in self.browser2.
self.browser = wx.TreeCtrl(self.panel3, id=2000, pos=(0,0), size=(200,325))
#self.browser2 is the "file group" where groups of files are accessed, respective to the directory selected in self.browser.
self.browser2 = wx.TreeCtrl(self.panel3, id=2001, pos=(0,325), size=(200,325))
self.tree = self.browser
#self.sortdict groups the table headers to integers---this works with sort function.
self.sortdict = {"A" : 0, "B" : 1, "C" : 2, "D" : 3, "E" : 4, "F" : 5, "G" : 6, "H" : 7, "I" : 8, "J" : 9, "K" : 10, "L" : 11, "M" : 12, "N" : 13, "O" : 14, "P" : 15, "Q" : 16, "R" : 17, "S" : 18, "T" : 19, "U" : 20, "V" : 21, "W" : 22, "X" : 23, "Y" : 24, "Z" : 25, "AA" : 26, "AB" : 27, "AC" : 28, "AD" : 29, "AE" : 30, "AF" : 31, "AG" : 32, "AH" : 33, "AI" : 34, "AJ" : 35, "AK" : 36, "AL" : 37, "AM" : 38, "AN" : 39, "AO" : 40, "AP" : 41, "AQ" : 42, "AR" : 43, "AS" : 44, "AT" : 45, "AU" : 46, "AV" : 47, "AW" : 48, "AX" : 49, "AY" : 50, "AZ" : 51}
#SIZER--main sizer for the program.
ver = wx.BoxSizer(wx.VERTICAL)
verpan2 = wx.BoxSizer(wx.VERTICAL)
hpan1 = wx.BoxSizer(wx.HORIZONTAL)
hpan2 = wx.BoxSizer(wx.HORIZONTAL)
hpan3 = wx.BoxSizer(wx.HORIZONTAL)
verpan2.Add(self.panel2, 8, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel_left, 5, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel, 24, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel_right, 3, wx.ALL|wx.EXPAND, 2)
hpan1.Add(self.panel_right2, 3, wx.ALL|wx.EXPAND, 2)
hpan2.Add(self.panel3, 1, wx.ALL|wx.EXPAND, 2)
hpan2.Add(verpan2, 7, wx.ALL|wx.EXPAND, 2)
hpan3.Add(self.panel4, 1, wx.ALL|wx.EXPAND, 2)
ver.Add(hpan1, 1, wx.EXPAND)
ver.Add(hpan2, 18, wx.EXPAND)
ver.Add(hpan3, 4, wx.EXPAND)
self.browser.SetSize(self.panel3.GetSize())
self.SetSizer(ver)
#TABS: lines 137-159 instantiate the tabs for the main viewing panel.
self.nb = wx.Notebook(self.panel2, id=7829, style = wx.NB_BOTTOM)
self.page1 = wx.ScrolledWindow(self.nb, wx.ID_ANY, wx.DefaultPosition, wx.DefaultSize, wx.HSCROLL|wx.VSCROLL )
self.page1.SetScrollRate( 5, 5 )
self.page2 = PageTwo(self.nb)
self.page3 = PageThree(self.nb)
self.page4 = PageFour(self.nb)
self.nb.AddPage(self.page2, "Table")
self.nb.AddPage(self.page1, "PNG")
self.nb.AddPage(self.page3, "Interactive")
self.page3.SetBackgroundColour((218, 218, 218))
sizer = wx.BoxSizer()
sizer.Add(self.nb, 1, wx.EXPAND)
self.panel2.SetSizer(sizer)
self.page1.SetBackgroundColour("white")
self.myGrid = gridlib.Grid(self.page2, id=1002)
#self.myGrid.CreateGrid(100, self.dataset_file_length) ### Sets this at 400 columns rather than 100 - Excel like
self.Bind(gridlib.EVT_GRID_CELL_RIGHT_CLICK, self.GridRightClick, id=1002)
self.Bind(gridlib.EVT_GRID_CELL_LEFT_DCLICK, self.GridRowColor, id=1002)
self.HighlightedCells = []
gridsizer = wx.BoxSizer(wx.VERTICAL)
gridsizer.Add(self.myGrid)
self.page2.SetSizer(gridsizer)
self.page2.Layout()
#In the event that the interactive tab is chosen, a function must immediately run.
self.Bind(wx.EVT_NOTEBOOK_PAGE_CHANGED, self.InteractiveTabChoose, id=7829)
#INTERACTIVE PANEL LAYOUT: lines 167-212
#Pca Setup
self.RunButton1 = wx.Button(self.page3, id=43, label="Run", pos=(275, 150), size=(120, bheight))
self.Bind(wx.EVT_BUTTON, self.InteractiveRun, id=43)
self.Divider1 = self.ln = wx.StaticLine(self.page3, pos=(5,100))
self.ln.SetSize((415,10))
IntTitleFont = wx.Font(15, wx.SWISS, wx.NORMAL, wx.BOLD)
self.InteractiveTitle = wx.StaticText(self.page3, label="Main Dataset Parameters", pos=(10, 15))
self.InteractiveDefaultMessage = wx.StaticText(self.page3, label="No interactive options available.", pos=(10, 45))
self.InteractiveTitle.SetFont(IntTitleFont)
self.IntFileTxt = wx.TextCtrl(self.page3, id=43, pos=(105,45), size=(375,20))
self.InteractiveFileLabel = wx.StaticText(self.page3, label="Selected File:", pos=(10, 45))
self.Yes1Label = wx.StaticText(self.page3, label="Yes", pos=(305, 80))
self.No1Label = wx.StaticText(self.page3, label="No", pos=(375, 80))
self.D_3DLabel = wx.StaticText(self.page3, label="3D", pos=(305, 120))
self.D_2DLabel = wx.StaticText(self.page3, label="2D", pos=(375, 120))
self.IncludeLabelsRadio = wx.RadioButton(self.page3, id=40, pos=(285, 83), size=(12, 12), style=wx.RB_GROUP)
self.No1Radio = wx.RadioButton(self.page3, id=41, pos=(355, 83), size=(12, 12))
self.IncludeLabelsRadio.SetValue(True)
#self.EnterPCAGenes = wx.TextCtrl(self.page3, id=48, pos=(105,45), size=(375,20))
self.D_3DRadio = wx.RadioButton(self.page3, id=46, pos=(285, 123), size=(12, 12), style=wx.RB_GROUP)
self.D_2DRadio = wx.RadioButton(self.page3, id=47, pos=(355, 123), size=(12, 12))
self.D_3DRadio.SetValue(True)
self.Opt1Desc = wx.StaticText(self.page3, label="Display sample labels next to each object", pos=(10, 80))
self.Opt2Desc = wx.StaticText(self.page3, label="Dimensions to display", pos=(10, 120))
self.IntFileTxt.Hide()
self.InteractiveFileLabel.Hide()
self.Yes1Label.Hide()
self.No1Label.Hide()
self.D_3DLabel.Hide()
self.D_2DLabel.Hide()
self.IncludeLabelsRadio.Hide()
self.No1Radio.Hide()
self.D_3DRadio.Hide()
self.D_2DRadio.Hide()
self.Opt1Desc.Hide()
self.Opt2Desc.Hide()
self.RunButton1.Hide()
self.Divider1.Hide()
#TERMINAL SETUP
TxtBox = wx.BoxSizer(wx.VERTICAL)
TxtBox.Add(self.control, 1, wx.EXPAND)
self.panel4.SetSizer(TxtBox)
self.panel4.Layout()
#SELECTION LIST
self.TopSelectList = []
self.SearchArray = []
self.SearchArrayFiltered = []
self.TopID = ""
self.ColoredCellList = []
#LOGO
self.png = wx.Image("logo.gif", wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
self.nb.SetSelection(1)
browspan = wx.BoxSizer(wx.VERTICAL)
browspan.Add(self.browser, 1, wx.EXPAND)
browspan.Add(self.browser2, 1, wx.EXPAND)
self.panel3.SetSizer(browspan)
self.PanelTitle = wx.StaticText(self.panel, label="", pos=(210, 15))
#Open Button
ButtonMan = wx.Button(self.panel_left, id=1001, label="Open Project", pos=(0,0), size=(100,100))
self.Bind(wx.EVT_BUTTON, self.OnOpen, id=1001)
OpenSizer = wx.BoxSizer(wx.HORIZONTAL)
OpenSizer.Add(ButtonMan, 1, wx.EXPAND)
self.panel_left.SetSizer(OpenSizer)
#STATUS BAR CREATE --- not all of these are currently functional. The "edit" menu still needs to be implemented.
status = self.CreateStatusBar()
menubar = wx.MenuBar()
file = wx.Menu()
edit = wx.Menu()
view = wx.Menu()
search = wx.Menu()
filter_table = wx.Menu()
help_menu = wx.Menu()
open_menu = wx.Menu()
open_menu.Append(120, 'Project')
open_menu.Append(121, 'File')
file.AppendMenu(101, '&Open\tCtrl+O', open_menu)
file.Append(102, '&Save\tCtrl+S', 'Save the document')
file.AppendSeparator()
file.Append(103, 'Options', '')
file.AppendSeparator()
quit = wx.MenuItem(file, 105, '&Quit\tCtrl+Q', 'Quit the Application')
file.AppendItem(quit)
edit.Append(109, 'Undo', '')
edit.Append(110, 'Redo', '')
edit.AppendSeparator()
edit.Append(106, '&Cut\tCtrl+X', '')
edit.Append(107, '&Copy\tCtrl+C', '')
edit.Append(108, '&Paste\tCtrl+V', '')
edit.AppendSeparator()
edit.Append(111, '&Select All\tCtrl+A', '')
view.Append(112, '&Clear Panel\tCtrl+.', '')
search.Append(113, 'Tree', '')
search.Append(114, 'Table', '')
filter_table.Append(116, 'Filter', '')
filter_table.Append(117, 'Sort', '')
help_menu.AppendSeparator()
help_menu.Append(139, 'Help', '')
help_menu.Append(140, 'About', '')
menubar.Append(file, "File")
menubar.Append(edit, "Edit")
menubar.Append(view, "View")
menubar.Append(search, "Search")
menubar.Append(filter_table, "Table")
menubar.Append(help_menu, "Help")
self.SetMenuBar(menubar)
#STATUS BAR BINDINGS
self.Bind(wx.EVT_MENU, self.OnOpen, id=120)
self.Bind(wx.EVT_MENU, self.OnOpenSingleFile, id=121)
self.Bind(wx.EVT_MENU, self.OnQuit, id=105)
self.Bind(wx.EVT_MENU, self.ClearVisualPanel, id=112)
self.Bind(wx.EVT_MENU, self.TreeSearch, id=113)
self.Bind(wx.EVT_MENU, self.GridSearch, id=114)
self.Bind(wx.EVT_MENU, self.FilterTable, id=116)
self.Bind(wx.EVT_MENU, self.SortTable, id=117)
self.Bind(wx.EVT_MENU, self.OnAbout, id=140)
self.Bind(wx.EVT_MENU, self.OnHelp, id=139)
self.Layout()
def OnQuit(self, event):
popup = wx.MessageDialog(None, "Are you sure you want to quit?", "Warning", wx.YES_NO)
popup_answer = popup.ShowModal()
#print popup_answer
if(popup_answer == 5103):
self.Close()
else:
return
def GridRowColor(self, event):
#This colors any row that has been selected and resets it accordingly: may be removed in future versions.
if len(self.HighlightedCells) > 0:
for i in self.HighlightedCells:
self.myGrid.SetCellBackgroundColour(i[0], i[1], (255, 255, 255))
self.HighlightedCells = []
self.GridRowEvent = event.GetRow()
for i in range(50):
self.myGrid.SetCellBackgroundColour(self.GridRowEvent, i, (235, 255, 255))
self.HighlightedCells.append((self.GridRowEvent, i))
def GridRightClick(self, event):
#Pop-up menu instantiation for a right click on the table.
self.GridRowEvent = event.GetRow()
# only do this part the first time so the events are only bound once
if not hasattr(self, "popupID3"):
self.popupID1 = wx.NewId()
self.popupID2 = wx.NewId()
if self.analyzeSplicing:
self.popupID3 = wx.NewId()
self.popupID4 = wx.NewId()
self.popupID5 = wx.NewId()
self.Bind(wx.EVT_MENU, self.GeneExpressionSummaryPlot, id=self.popupID1)
self.Bind(wx.EVT_MENU, self.PrintGraphVariables, id=self.popupID2)
if self.analyzeSplicing:
self.Bind(wx.EVT_MENU, self.AltExonViewInitiate, id=self.popupID3)
self.Bind(wx.EVT_MENU, self.IsoformViewInitiate, id=self.popupID4)
self.Bind(wx.EVT_MENU, self.SashimiPlotInitiate, id=self.popupID5)
# build the menu
menu = wx.Menu()
itemOne = menu.Append(self.popupID1, "Gene Plot")
#itemTwo = menu.Append(self.popupID2, "Print Variables")
if self.analyzeSplicing:
itemThree = menu.Append(self.popupID3, "Exon Plot")
itemFour = menu.Append(self.popupID4, "Isoform Plot")
itemFive = menu.Append(self.popupID5, "SashimiPlot")
# show the popup menu
self.PopupMenu(menu)
menu.Destroy()
def AltExonViewInitiate(self, event):
### Temporary option for exon visualization until the main tool is complete and database can be bundled with the program
i=0; values=[]
while i<1000:
try:
val = str(self.myGrid.GetCellValue(self.GridRowEvent, i))
values.append(val)
if ('G000' in val) and '->' not in val:
geneID_temp = string.split(val,":")[0]
if ('G000' in geneID_temp) and '->' not in geneID_temp:
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
else:
geneID_temp = string.split(val,":")[1]
if ('G000' in geneID_temp):
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
i+=1
except Exception: break
datasetDir = self.main_results_directory
#print datasetDir
self.control.write("Plotting... " + geneID + "\n")
data_type = 'raw expression'
show_introns = 'no'
analysisType = 'graph-plot'
exp_dir = unique.filepath(datasetDir+'/ExpressionInput')
#print exp_dir
exp_file = UI.getValidExpFile(exp_dir)
#print print exp_file
UI.altExonViewer(self.species,self.platform,exp_file,geneID,show_introns,analysisType,'')
def IsoformViewInitiate(self, event):
#print os.getcwd()
#This function is a part of the pop-up menu for the table: it plots a gene and protein level view.
os.chdir(parentDirectory)
t = os.getcwd()
#self.control.write(str(os.listdir(t)) + "\n")
gene = self.myGrid.GetCellValue(self.GridRowEvent, 0)
i=0; values=[]; spliced_junctions=[]
while i<1000:
try:
val = str(self.myGrid.GetCellValue(self.GridRowEvent, i))
values.append(val)
if ('G000' in val) and 'ENSP' not in val and 'ENST' not in val and '->' not in val:
geneID_temp = string.split(val,":")[0]
if ('G000' in geneID_temp) and '->' not in geneID_temp:
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
elif '->' in geneID_temp: pass
else:
geneID_temp = string.split(val,":")[1]
if ('G000' in geneID_temp):
geneID = geneID_temp
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
i+=1
except Exception: break
#print [geneID]
self.control.write("Plotting... " + geneID + "\n")
from visualization_scripts import ExPlot
reload(ExPlot)
ExPlot.remoteGene(geneID,self.species,self.main_results_directory,self.CurrentFile)
#Q = subprocess.Popen(['python', 'ExPlot13.py', str(R)])
#os.chdir(currentDirectory)
def SashimiPlotInitiate(self, event):
#This function is a part of the pop-up menu for the table: it plots a SashimiPlot
datasetDir = str(self.main_results_directory)
geneID = None
#self.control.write(str(os.listdir(t)) + "\n")
i=0; values=[]; spliced_junctions=[]
while i<1000:
try:
val = str(self.myGrid.GetCellValue(self.GridRowEvent, i))
values.append(val)
if ('G000' in val) and ':E' in val:
#if 'ASPIRE' in self.DirFileTxt:
if ':ENS' in val:
val = 'ENS'+string.split(val,':ENS')[1]
val = string.replace(val,'|', ' ')
#Can also refer to MarkerFinder files
if ' ' in val:
if '.' not in string.split(val,' ')[1]:
val = string.split(val,' ')[0] ### get the gene
if 'Combined-junction' in self.DirFileTxt:
if '-' in val and '|' in val:
junctions = string.split(val,'|')[0]
val = 'ENS'+string.split(junctions,'-ENS')[-1]
spliced_junctions.append(val) ### exclusion junction
if 'index' in self.DirFileTxt: ### Splicing-index analysis
spliced_junctions.append(val)
elif '-' in val:
spliced_junctions.append(val) ### junction-level
if ('G000' in val) and geneID == None and '->' not in val:
geneID = string.split(val,":")[0]
if ' ' in geneID:
geneID = string.split(geneID,' ')[0]
i+=1
except Exception: break
if len(spliced_junctions)>0:
spliced_junctions = [spliced_junctions[-1]] ### Select the exclusion junction
else:
spliced_junctions = [geneID]
if 'DATASET' in self.DirFileTxt:
spliced_junctions = [geneID]
from visualization_scripts import SashimiPlot
reload(SashimiPlot)
self.control.write("Attempting to build SashimiPlots for " + str(spliced_junctions[0]) + "\n")
SashimiPlot.remoteSashimiPlot(self.species,datasetDir,datasetDir,None,events=spliced_junctions,show=True) ### assuming the bam files are in the root-dir
def GeneExpressionSummaryPlot(self, event):
#This function is a part of the pop-up menu for the table: it plots expression levels.
Wikipathway_Flag = 0
Protein_Flag = 0
VarGridSet = []
try:
for i in range(3000):
try:
p = self.myGrid.GetCellValue(0, i)
VarGridSet.append(p)
except Exception:
pass
for i in VarGridSet:
y = re.findall("WikiPathways", i)
if len(y) > 0:
Wikipathway_Flag = 1
break
if Wikipathway_Flag == 0:
for i in VarGridSet:
y = re.findall("Select Protein Classes", i)
if len(y) > 0:
Protein_Flag = 1
break
if Protein_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
if Wikipathway_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
q_barrel = []
for i in VariableBox:
q_box = []
q = i
for p in range(500):
if(q < 0):
break
q = q - 1
#Regular expression is needed to find the appropriate columns to match from.
FLAG_log_fold = re.findall("log_fold",VarGridSet[q])
FLAG_adjp = re.findall("adjp",VarGridSet[q])
FLAG_rawp = re.findall("rawp",VarGridSet[q])
FLAG_wiki = re.findall("Wiki",VarGridSet[q])
FLAG_pc = re.findall("Protein Classes",VarGridSet[q])
FLAG_avg = re.findall("avg",VarGridSet[q])
if(len(FLAG_log_fold) > 0 or len(FLAG_adjp) > 0 or len(FLAG_rawp) > 0 or len(FLAG_wiki) > 0 or len(FLAG_pc) > 0 or len(FLAG_avg) > 0):
break
q_box.append(q)
q_barrel.append((q_box))
Values_List = []
HeaderList = []
TitleList = self.myGrid.GetCellValue(self.GridRowEvent, 0)
for i in VariableBox:
HeaderList.append(self.myGrid.GetCellValue(0, i))
for box in q_barrel:
output_box = []
for value in box:
output_var = self.myGrid.GetCellValue(self.GridRowEvent, value)
output_box.append(float(output_var))
Values_List.append((output_box))
self.control.write("Plotting values from: " + str(self.myGrid.GetCellValue(self.GridRowEvent, 0)) + "\n")
Output_Values_List = []
Output_std_err = []
for box in Values_List:
T = 0
for item in box:
T = T + item
output_item = T / float(len(box))
Output_Values_List.append(output_item)
for box in Values_List:
box_std = np.std(box)
box_power = np.power((len(box)), 0.5)
std_err = box_std / float(box_power)
Output_std_err.append(std_err)
n_groups = len(Output_Values_List)
#PLOTTING STARTS --
means_men = Output_Values_List
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
index = np.arange(n_groups)
bar_width = 0.35
pos = bar_width / float(2)
opacity = 0.4
error_config = {'ecolor': '0.3'}
with warnings.catch_warnings():
rects1 = plt.bar((index + pos), Output_Values_List, bar_width,
alpha=opacity,
color='b',
yerr=Output_std_err,
label="")
#plt.title(self.myGrid.GetCellValue(self.GridRowEvent, 2))
plt.title(TitleList)
plt.xticks(index + bar_width, HeaderList)
plt.legend()
plt.tight_layout()
plt.show()
#-- PLOTTING STOPS
except Exception:
self.control.write("Plot failed to output... only applicalbe for the file with prefix DATASET")
def PrintGraphVariables(self, event):
#This function is a part of the pop-up menu for the table: it prints the variables for the expression levels. Used for testing mainly.
Wikipathway_Flag = 0
Protein_Flag = 0
VarGridSet = []
for i in range(100):
p = self.myGrid.GetCellValue(0, i)
VarGridSet.append(p)
for i in VarGridSet:
y = re.findall("WikiPathways", i)
if len(y) > 0:
Wikipathway_Flag = 1
break
if Wikipathway_Flag == 0:
for i in VarGridSet:
y = re.findall("Select Protein Classes", i)
if len(y) > 0:
Protein_Flag = 1
break
if Protein_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
if Wikipathway_Flag == 1:
VariableBox = []
for i in range(len(VarGridSet)):
y = re.findall("avg", VarGridSet[i])
if(len(y) > 0):
VariableBox.append(i)
q_barrel = []
for i in VariableBox:
q_box = []
q = i
for p in range(500):
if(q < 0):
break
q = q - 1
FLAG_log_fold = re.findall("log_fold",VarGridSet[q])
FLAG_adjp = re.findall("adjp",VarGridSet[q])
FLAG_rawp = re.findall("rawp",VarGridSet[q])
FLAG_wiki = re.findall("Wiki",VarGridSet[q])
FLAG_pc = re.findall("Protein Classes",VarGridSet[q])
FLAG_avg = re.findall("avg",VarGridSet[q])
if(len(FLAG_log_fold) > 0 or len(FLAG_adjp) > 0 or len(FLAG_rawp) > 0 or len(FLAG_wiki) > 0 or len(FLAG_pc) > 0 or len(FLAG_avg) > 0):
break
q_box.append(q)
q_barrel.append((q_box))
self.control.write("Selected Row: " + str(self.myGrid.GetCellValue(self.GridRowEvent, 0)) + "\n")
self.control.write("Selected Columns: " + str(q_barrel) + "\n")
Values_List = []
HeaderList = []
for i in VariableBox:
HeaderList.append(self.myGrid.GetCellValue(0, i))
for box in q_barrel:
output_box = []
for value in box:
output_var = self.myGrid.GetCellValue(self.GridRowEvent, value)
output_box.append(float(output_var))
Values_List.append((output_box))
self.control.write("Selected Values: " + str(Values_List) + "\n")
def InteractiveTabChoose(self, event):
#If the interactive tab is chosen, a plot will immediately appear with the default variables.
try:
#The PCA and Heatmap flags are set; a different UI will appear for each of them.
PCA_RegEx = re.findall("PCA", self.DirFile)
Heatmap_RegEx = re.findall("hierarchical", self.DirFile)
if(self.nb.GetSelection() == 2):
if(len(PCA_RegEx) > 0 or len(Heatmap_RegEx) > 0):
self.InteractiveRun(event)
except:
pass
def getDatasetVariables(self):
for file in os.listdir(self.main_results_directory):
if 'AltAnalyze_report' in file and '.log' in file:
log_file = unique.filepath(self.main_results_directory+'/'+file)
log_contents = open(log_file, "rU")
species = ' species: '
platform = ' method: '
for line in log_contents:
line = line.rstrip()
if species in line:
self.species = string.split(line,species)[1]
if platform in line:
self.platform = string.split(line,platform)[1]
try:
self.supported_genesets = UI.listAllGeneSetCategories(self.species,'WikiPathways','gene-mapp')
self.geneset_type = 'WikiPathways'
except Exception:
try:
self.supported_genesets = UI.listAllGeneSetCategories(self.species,'GeneOntology','gene-mapp')
self.geneset_type = 'GeneOntology'
except Exception:
self.supported_genesets = []
self.geneset_type = 'None Selected'
#print 'Using',self.geneset_type, len(self.supported_genesets),'pathways'
break
try:
for file in os.listdir(self.main_results_directory+'/ExpressionOutput'):
if 'DATASET' in file:
dataset_file = unique.filepath(self.main_results_directory+'/ExpressionOutput/'+file)
for line in open(dataset_file,'rU').xreadlines():
self.dataset_file_length = len(string.split(line,'\t'))
break
except Exception:
pass
try:
if self.dataset_file_length<50:
self.dataset_file_length=50
except Exception:
self.dataset_file_length=50
self.myGrid.CreateGrid(100, self.dataset_file_length) ### Re-set the grid width based on the DATASET- file width
def OnOpen(self, event):
#Bound to the open tab from the menu and the "Open Project" button.
openFileDialog = wx.DirDialog(None, "Choose project", "", wx.DD_DEFAULT_STYLE | wx.DD_DIR_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return
#self.input stream is the path of our project's main directory.
self.main_results_directory = openFileDialog.GetPath()
if (len(self.main_results_directory) > 0):
if self.species == '':
self.getDatasetVariables()
self.SearchArray = []
self.SearchArrayFiltered = []
self.control.write("Working..." + "\n")
#FLAG COLLECT
root = 'Data'
for (dirpath, dirnames, filenames) in os.walk(root):
for dirname in dirnames:
#fullpath = os.path.join(dirpath, dirname)
fullpath = currentDirectory+'/'+dirpath+'/'+dirname
for filename in sorted(filenames):
if filename == "location.txt":
#file_fullpath = unique.filepath(os.path.join(dirpath, filename))
file_fullpath = currentDirectory+'/'+dirpath+'/'+filename
file_location = open(file_fullpath, "r")
fl_array = []
for line in file_location:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\r")
if len(line) > 1:
fl_array.append(line[0])
fl_array.append(line[1])
else:
fl_array.append(line[0])
file_location.close()
#if dirname == 'ExonGraph': print fl_array
if(len(fl_array) == 3):
fl_array.append(dirpath)
self.SearchArray.append(fl_array)
self.control.write("Opening project at: " + self.main_results_directory + "\n")
self.browser2.DeleteAllItems()
#SEARCH USING FLAGS
count = 0
for FLAG in self.SearchArray:
if((FLAG[0][-1] != "/") and (FLAG[0][-1] != "\\")):
SearchingFlag = FLAG[0] + "/"
SearchingFlag = FLAG[0]
SearchingFlagPath = self.main_results_directory + "/" + SearchingFlag
try:
SFP_Contents = os.listdir(SearchingFlagPath)
for filename in SFP_Contents:
Search_base = FLAG[1]
Search_base = Search_base.split(":")
Search_base = Search_base[1]
Split_Extension = str(FLAG[2])
Split_Extension = Split_Extension.split(":")
S_E = str(Split_Extension[1]).split(",")
GOOD_FLAG = 0
if(Search_base != "*"):
for i in S_E:
if(filename[-4:] == i):
GOOD_FLAG = 1
if(Search_base != "*"):
candidate = re.findall(Search_base, filename)
if(Search_base == "*"):
candidate = "True"
GOOD_FLAG = 1
if (len(Search_base) == 0 or GOOD_FLAG == 0):
continue
if len(candidate) > 0:
self.SearchArrayFiltered.append(FLAG)
except:
continue
count = count + 1
#AVAILABLE DATA SET
try:
shutil.rmtree("AvailableData")
except:
pass
for i in self.SearchArrayFiltered:
AvailablePath = "Available" + i[3]
if '\\' in AvailablePath: ### Windows
AvailablePath = string.replace(AvailablePath,'/','\\')
if '/' in AvailablePath:
Path_List = AvailablePath.split("/")
else:
Path_List = AvailablePath.split("\\")
Created_Directory = ""
for directorynum in range(len(Path_List)):
if directorynum == 0:
Created_Directory = Created_Directory + Path_List[directorynum]
try:
os.mkdir(Created_Directory)
except:
continue
else:
Created_Directory = Created_Directory + "/" + Path_List[directorynum]
try:
os.mkdir(Created_Directory)
except:
continue
#TOP BROWSER SET
root = 'AvailableData'
color_root = [253, 253, 253]
self.tree.DeleteAllItems()
self.ids = {root : self.tree.AddRoot(root)}
self.analyzeSplicing=False
for (dirpath, dirnames, filenames) in os.walk(root):
#print 'x',[dirpath, dirnames, filenames]#;sys.exit()
for dirname in dirnames:
#print dirpath, dirname
if 'Splicing' in dirpath: self.analyzeSplicing=True
fullpath = os.path.join(dirpath, dirname)
#print currentDirectory+'/'+dirpath
self.ids[fullpath] = self.tree.AppendItem(self.ids[dirpath], dirname)
DisplayColor = [255, 255, 255]
DisplayColor[0] = color_root[0] - len(dirpath)
DisplayColor[1] = color_root[1] - len(dirpath)
DisplayColor[2] = color_root[2] - len(dirpath)
self.tree.SetItemBackgroundColour(self.ids[fullpath], DisplayColor)
for i in self.SearchArrayFiltered:
SearchRoot = "Available" + i[3]
if(SearchRoot == fullpath):
SearchSplit = i[1].split(":")
SearchSplit = SearchSplit[1]
SearchSplit = SearchSplit + ";" + i[0]
SearchSplit = SearchSplit + ";" + i[2]
DisplayColor = [130, 170, 250]
self.tree.SetItemData(self.ids[fullpath],wx.TreeItemData(SearchSplit))
self.tree.SetItemBackgroundColour(self.ids[fullpath], DisplayColor)
self.tree.SetItemBackgroundColour(self.ids[root], [100, 140, 240])
self.tree.Expand(self.ids[root])
try: self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.SelectedTopTreeID, self.tree)
except Exception: pass
#OPENING DISPLAY
try:
self.LOGO.Destroy()
except:
pass
self.png = wx.Image(rootDirectory+"/Config/no-image-available.png", wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
self.control.write("Resetting grid..." + "\n")
self.control.write("Currently displaying: " + "SUMMARY" + "\n")
self.myGrid.ClearGrid()
if 'ExpressionInput' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ExpressionInput')[0]
if 'AltResults' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'AltResults')[0]
if 'ExpressionOutput' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ExpressionOutput')[0]
if 'GO-Elite' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'GO-Elite')[0]
if 'ICGS' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ICGS')[0]
if 'DataPlots' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'DataPlots')[0]
if 'AltExpression' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'AltExpression')[0]
if 'AltDatabase' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'AltDatabase')[0]
if 'ExonPlots' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'ExonPlots')[0]
if 'SashimiPlots' in self.main_results_directory:
self.main_results_directory = string.split(self.main_results_directory,'SashimiPlots')[0]
opening_display_folder = self.main_results_directory + "/ExpressionOutput"
try:
list_contents = os.listdir(opening_display_folder)
target_file = ""
for file in list_contents:
candidate = re.findall("SUMMARY", file)
if len(candidate) > 0:
target_file = file
break
except Exception:
opening_display_folder = self.main_results_directory
list_contents = os.listdir(opening_display_folder)
for file in list_contents:
candidate = re.findall(".log", file)
if len(candidate) > 0:
target_file = file ### get the last log file
target_file = unique.filepath(opening_display_folder + "/" + target_file)
opened_target_file = open(target_file, "r")
opened_target_file_contents = []
for line in opened_target_file:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if len(line)==1: line += ['']*5
opened_target_file_contents.append((line))
self.table_length = len(opened_target_file_contents)
for cell in self.ColoredCellList:
try: self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
except Exception: pass
self.ColoredCellList = []
x_count = 0
for item_list in opened_target_file_contents:
y_count = 0
for item in item_list:
try:
self.myGrid.SetCellValue(x_count, y_count, item)
except Exception:
pass ### if the length of the row is 0
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
#This line always sets the opening display to the "Table" tab.
self.nb.SetSelection(0)
def OnOpenSingleFile(self, event):
#Opens only one file as opposed to the whole project; possibly unstable and needs further testing.
openFileDialog = wx.FileDialog(self, "", "", "", "", wx.FD_OPEN | wx.FD_FILE_MUST_EXIST)
if openFileDialog.ShowModal() == wx.ID_CANCEL:
return
single_input_stream = openFileDialog.GetPath()
self.control.write(str(single_input_stream) + "\n")
if single_input_stream[-4:] == ".txt":
self.myGrid.ClearGrid()
self.DirFileTxt = single_input_stream
self.DirFile = single_input_stream
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
for line in table_file:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(len(table_file_contents) >= 5000):
break
table_file_contents.append((line))
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
try:
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
except Exception: pass
self.page2.Layout()
if single_input_stream[-4:] == ".png":
self.myGrid.ClearGrid()
try:
self.LOGO.Destroy()
except:
pass
self.png = wx.Image(single_input_stream, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
if single_input_stream[-4:] == ".pdf":
#http://wxpython.org/Phoenix/docs/html/lib.pdfviewer.html
pass
def OnSave(self, event):
#Save function is currently not implemented but is a priority for future updates.
saveFileDialog = wx.FileDialog(self, "", "", "", "", wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT)
if saveFileDialog.ShowModal() == wx.ID_CANCEL:
return
def OnSearch(self, event):
#This handles the search prompt pop-up box when using "search -> table" from the status bar menu.
popup = wx.TextEntryDialog(None, "Enter filter for results.", "Search", "Enter search here.")
if popup.ShowModal()==wx.ID_OK:
answer=popup.GetValue()
popup.Destroy()
else:
popup.Destroy()
return
def TreeSearch(self, event):
#Search tree function: searches the tree for a given phrase and opens the tree to that object.
popup = wx.TextEntryDialog(None, "Search the browser tree for directories and files.", "Search", "Enter search here.")
if popup.ShowModal()==wx.ID_OK:
answer=popup.GetValue()
self.control.write("K" + str(answer) + "\n")
os.chdir(currentDirectory) ### NS-91615 alternative to __file__
rootman = "AvailableData"
search_box = []
found = ""
for (dirpath, dirnames, filenames) in os.walk(rootman):
for dirname in dirnames:
fullpath = dirpath + "/" + dirname
search_box.append(fullpath)
self.control.write("Searching..." + "\n")
for path in search_box:
path2 = path.split("/")
search_candidate = path2[-1]
self.control.write(search_candidate + " " + str(answer) + "\n")
if(str(answer) == search_candidate):
found = path
break
self.control.write(found + "\n")
tree_recreate = found.split("/")
treepath = ""
self.control.write(str(range(len(tree_recreate))) + "\n")
tree_length = len(tree_recreate)
last_tree_value = len(tree_recreate) - 1
for i in range(tree_length):
self.control.write(str(i) + "\n")
if(i == 0):
self.tree.Expand(self.ids[tree_recreate[i]])
treepath = treepath + tree_recreate[i]
self.control.write(treepath + "\n")
if(i > 0 and i < last_tree_value):
treepath = treepath + "/" + tree_recreate[i]
self.control.write(treepath + "\n")
self.tree.Expand(self.ids[treepath])
if(i == last_tree_value):
treepath = treepath + "/" + tree_recreate[i]
self.control.write(treepath + "\n")
self.tree.SelectItem(self.ids[treepath])
popup.Destroy()
else:
popup.Destroy()
return
def GridSearch(self, event):
#Search table function: this searchs the table and highlights the search query in the table; also zooms to the nearest match.
popup = wx.TextEntryDialog(None, "Search the table.", "Search", "Enter search here.")
if popup.ShowModal()==wx.ID_OK:
PageDownFound = "False"
match_count = 0
answer=popup.GetValue()
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
if(self.table_length > 5100):
y_range = range(5100)
y_range = range(self.table_length)
x_range = range(100)
y_count = 0
for number in y_range:
x_count = 0
for number in x_range:
cellvalue = self.myGrid.GetCellValue(y_count, x_count)
gridmatch = re.findall(answer, cellvalue)
if(len(gridmatch) > 0):
if(PageDownFound == "False"):
PageScrollY = y_count
PageScrollX = x_count
PageDownFound = "True"
match_count = match_count + 1
self.ColoredCellList.append((y_count, x_count))
self.myGrid.SetCellBackgroundColour(y_count, x_count, (255, 255, 125))
x_count = x_count + 1
y_count = y_count + 1
#"MakeCellVisible" zooms to the given coordinates.
self.myGrid.MakeCellVisible(PageScrollY, PageScrollX)
terminal_list = []
for cell in self.ColoredCellList:
newrow = cell[0] + 1
newcolumn = cell[1] + 1
terminal_list.append((newrow, newcolumn))
self.control.write(str(match_count) + " matches found for " + answer + "\n")
self.control.write("At positions (row, column): " + str(terminal_list) + "\n")
popup.Destroy()
self.nb.SetSelection(0)
else:
popup.Destroy()
return
def FilterTable(self, event):
#The filter function displays ONLY the rows that have matches for the given search. Does not delete the filtered out data---table data is still fully functional and usable.
popup = wx.TextEntryDialog(None, "Filter the table.", "Search", "Enter filter phrase.")
if popup.ShowModal()==wx.ID_OK:
self.myGrid.ClearGrid()
answer=popup.GetValue()
try:
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
count = 0
for line in table_file:
line = line.rstrip(); line = string.replace(line,'"','')
regex_test = re.findall(answer.upper(), line.upper())
line = line.split("\t")
if(len(regex_test) > 0 or count == 0):
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((line))
count = count + 1
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to open txt." + "\n")
self.nb.SetSelection(0)
def SortTable(self, event):
#The sort function re-writes the table sorting by either descending or ascending values in a given column.
popup = wx.TextEntryDialog(None, "Sort the table.", "Sort", "Which column to sort from?")
if popup.ShowModal()==wx.ID_OK:
self.myGrid.ClearGrid()
answer=popup.GetValue()
answer = answer.upper()
try:
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
pre_sort2 = []
header = []
t_c = 0
column_clusters_flat = 0
for line in table_file:
line=string.replace(line,'Insufficient Expression','0')
try:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(t_c == 0):
header.append((line))
t_c = t_c + 1
continue
if(line[0] == "column_clusters-flat"):
header.append((line))
column_clusters_flat = 1
continue
line_sort_select = line[self.sortdict[answer]]
pre_sort1 = []
count = 0
for i in line:
if(count == 0):
try:
pre_sort1.append(float(line_sort_select))
except:
pre_sort1.append(line_sort_select)
pre_sort1.append(i)
if(count == self.sortdict[answer]):
count = count + 1
continue
if(count != 0):
pre_sort1.append(i)
count = count + 1
pre_sort2.append((pre_sort1))
except:
continue
table_file_contents.append(header[0])
if(column_clusters_flat == 1):
table_file_contents.append(header[1])
pre_sort2 = sorted(pre_sort2, reverse = True)
for line in pre_sort2:
try:
final_count1 = 0
final_count2 = 1
send_list = []
for item in line:
if(final_count1 == 0):
send_list.append(line[final_count2])
if(final_count1 == self.sortdict[answer]):
send_list.append(str(line[0]))
if(final_count1 != 0 and final_count1 != self.sortdict[answer]):
if(final_count1 < self.sortdict[answer]):
send_list.append(line[final_count2])
if(final_count1 > self.sortdict[answer]):
send_list.append(line[final_count1])
final_count1 = final_count1 + 1
final_count2 = final_count2 + 1
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((send_list))
except:
continue
n_table_file_contents = []
if(answer.upper() == "A"):
for i in range(len(table_file_contents)):
if(i == 0):
max_length = len(table_file_contents[i])
if(max_length < len(table_file_contents[i])):
n_l = table_file_contents[i][2:]
else:
n_l = table_file_contents[i]
n_table_file_contents.append((n_l))
table_file_contents = n_table_file_contents
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSizeRows(True)
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to sort." + "\n")
self.nb.SetSelection(0)
def FilterTablefromButton(self, event):
#Same filter function as before, but this function is bound to the button in the top-right corner of the main GUI.
self.myGrid.ClearGrid()
#In single line text boxes, you must always set 0 to the GetLineText value; 0 represents the first and only line.
answer = self.filterbox.GetLineText(0)
try:
try:
self.myGrid.DeleteRows(100, self.AppendTotal, True)
except:
pass
table_file_contents = []
count = 0
for line in open(self.DirFileTxt,'rU').xreadlines():
line = line.rstrip(); line = string.replace(line,'"','')
regex_test = re.findall(answer.upper(), line.upper())
line = line.split("\t")
if(len(regex_test) > 0 or count == 0):
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((line))
count = count + 1
self.table_length = len(table_file_contents)
self.control.write("Table Length: " + str(self.table_length) + "\n")
if(self.table_length > 100):
self.AppendTotal = self.table_length - 100
self.myGrid.AppendRows(self.AppendTotal, True)
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to open txt." + "\n")
self.nb.SetSelection(0)
def SortTablefromButton(self, event):
#Same sort function as before, but this function is bound to the button in the top-right corner of the main GUI.
answer = self.sortbox.GetLineText(0)
self.myGrid.ClearGrid()
answer = answer.upper()
try:
table_file = open(self.DirFileTxt, "r")
table_file_contents = []
pre_sort2 = []
header = []
t_c = 0
column_clusters_flat = 0
for line in table_file:
line=string.replace(line,'Insufficient Expression','0')
try:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(t_c == 0):
header.append((line))
t_c = t_c + 1
continue
if(line[0] == "column_clusters-flat"):
header.append((line))
column_clusters_flat = 1
continue
line_sort_select = line[self.sortdict[answer]]
pre_sort1 = []
count = 0
for i in line:
if(count == 0):
try:
pre_sort1.append(float(line_sort_select))
except:
pre_sort1.append(line_sort_select)
pre_sort1.append(i)
if(count == self.sortdict[answer]):
count = count + 1
continue
if(count != 0):
pre_sort1.append(i)
count = count + 1
pre_sort2.append((pre_sort1))
except:
continue
table_file_contents.append(header[0])
if(column_clusters_flat == 1):
table_file_contents.append(header[1])
if(self.DescendingRadio.GetValue() == True):
pre_sort2 = sorted(pre_sort2, reverse = True)
if(self.AscendingRadio.GetValue() == True):
pre_sort2 = sorted(pre_sort2)
for line in pre_sort2:
try:
final_count1 = 0
final_count2 = 1
send_list = []
for item in line:
if(final_count1 == 0):
send_list.append(line[final_count2])
if(final_count1 == self.sortdict[answer]):
send_list.append(str(line[0]))
if(final_count1 != 0 and final_count1 != self.sortdict[answer]):
if(final_count1 < self.sortdict[answer]):
send_list.append(line[final_count2])
if(final_count1 > self.sortdict[answer]):
send_list.append(line[final_count1])
final_count1 = final_count1 + 1
final_count2 = final_count2 + 1
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((send_list))
except:
continue
n_table_file_contents = []
if(answer.upper() == "A"):
for i in range(len(table_file_contents)):
if(i == 0):
max_length = len(table_file_contents[i])
if(max_length < len(table_file_contents[i])):
n_l = table_file_contents[i][2:]
else:
n_l = table_file_contents[i]
n_table_file_contents.append((n_l))
table_file_contents = n_table_file_contents
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSizeRows(True)
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to sort." + "\n")
self.nb.SetSelection(0)
def SelectedTopTreeID(self, event):
item = event.GetItem()
try:
#This handles the selection of an item in the TOP tree browser.
item = event.GetItem()
itemObject = self.tree.GetItemData(item).GetData()
SearchObject = itemObject.split(";")
SearchSuffix = SearchObject[0]
SearchPath = SearchObject[1]
SearchExtension = SearchObject[2]
SearchExtension = SearchExtension.split(":")
SearchExtension = SearchExtension[1:]
SearchExtension = SearchExtension[0]
SearchExtension = SearchExtension.split(",")
#SELECTION IMPLEMENT
ID_Strings = []
self.TopSelectList = []
self.TopID = SearchSuffix
root = self.main_results_directory + "/" + SearchPath
root_display = self.main_results_directory + "/" + SearchPath
root_contents = os.listdir(root)
root_contents_display = os.listdir(root)
for obj in root_contents:
if(SearchSuffix != "*"):
FindList = re.findall(SearchSuffix, obj)
if(len(FindList) > 0):
self.TopSelectList.append(obj)
#print obj
self.browser2.DeleteAllItems()
for filename in root_contents:
if(SearchSuffix != "*"):
FindList2 = re.findall(SearchSuffix, filename)
if(len(FindList2) > 0):
display_name = filename[0:-4]
ID_Strings.append(display_name)
else:
if(filename[-4] == "."):
display_name = filename[0:-4]
if "AVERAGE-" not in display_name and "COUNTS-" not in display_name:
ID_Strings.append(display_name)
ID_Strings = list(set(ID_Strings))
change_path = currentDirectory + "/UseDir" ### NS-91615 alternative to __file__
shutil.rmtree("UseDir")
os.mkdir("UseDir")
#self.control.write(ID_Strings[0] + "\n")
os.chdir(change_path)
for marker in ID_Strings:
try:
os.mkdir(marker)
except:
pass
os.chdir(currentDirectory) ### NS-91615 alternative to __file__
root = "UseDir"
color_root2 = [223, 250, 223]
self.ids2 = {root : self.browser2.AddRoot(root)}
for (dirpath, dirnames, filenames) in os.walk(root):
color_root2[0] = color_root2[0] - 1
color_root2[1] = color_root2[1] - 0
color_root2[2] = color_root2[2] - 1
for dirname in dirnames:
#self.control.write(str(SearchExtension) + "\n")
Extensions = dirname + "|" + str(SearchExtension) + "|" + str(SearchPath)
fullpath = os.path.join(dirpath, dirname)
self.ids2[fullpath] = self.browser2.AppendItem(self.ids2[dirpath], dirname)
self.browser2.SetItemData(self.ids2[fullpath],wx.TreeItemData(Extensions))
T = re.findall("DATASET", fullpath)
if(len(T) > 0):
self.browser2.SetItemBackgroundColour(self.ids2[fullpath], [250, 100, 100])
else:
self.browser2.SetItemBackgroundColour(self.ids2[fullpath], [130, 170, 250])
self.browser2.SetItemBackgroundColour(self.ids2[root], [110, 150, 250])
self.Bind(wx.EVT_TREE_ITEM_ACTIVATED, self.SelectedBottomTreeID, self.browser2)
self.browser2.ExpandAll()
#OPENING DISPLAY
display_file_selected = ""
TXT_FLAG = 0
PNG_FLAG = 0
if(root_display[-1] != "/"):
root_display = root_display + "/"
for possible in root_contents_display:
total_filepath = unique.filepath(root_display + possible)
if(possible[-4:] == ".txt"):
self.control.write("Displaying File: " + str(total_filepath) + "\n")
display_file_selected = total_filepath
break
TXT_FLAG = 0
PNG_FLAG = 0
#self.control.write(str(os.listdir(root)) + "\n")
#self.control.write(str(SearchExtension) + "\n")
for i in SearchExtension:
if(i == ".txt"):
TXT_FLAG = 1
#self.control.write(str(i) + "\n")
if(i == ".png"):
PNG_FLAG = 1
#self.control.write(str(i) + "\n")
if(root_display[-1] != "/"):
root_display = root_display + "/"
Pitch = os.listdir(root)
PitchSelect = Pitch[0]
self.CurrentFile = PitchSelect
#self.control.write(str(PitchSelect) + " " + root_display + "\n")
self.DirFile = unique.filepath(root_display + PitchSelect)
self.IntFileTxt.Clear()
self.IntFileTxt.write(self.DirFile)
self.DirFileTxt = unique.filepath(root_display + PitchSelect + ".txt")
DirFilePng = unique.filepath(root_display + PitchSelect + ".png")
self.myGrid.ClearGrid()
title_name = PitchSelect
try:
self.LOGO.Destroy()
except:
pass
try:
self.PanelTitle.Destroy()
except:
pass
font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD)
self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(5, 7))
self.PanelTitle.SetFont(font)
if(TXT_FLAG == 1):
try:
self.myGrid.DeleteRows(100, self.AppendTotal, True)
except:
pass
try:
#First time the DATASET file is imported
#font = wx.Font(16, wx.DECORATIVE, wx.BOLD, wx.NORMAL)
#self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(210, 15))
#self.PanelTitle.SetFont(font)
#table_file = open(self.DirFileTxt, "rU")
table_file_contents = []
column_lengths = []
count=0
for line in open(self.DirFileTxt,'rU').xreadlines():
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
column_lengths.append(len(line))
table_file_contents.append((line))
if count>2000: break
count+=1
self.max_column_length = max(column_lengths)
self.table_length = len(table_file_contents)
if(self.table_length > 100 and self.table_length < 5000):
self.AppendTotal = self.table_length - 100
self.myGrid.AppendRows(self.AppendTotal, True)
if(self.table_length >= 5000):
self.AppendTotal = 5000
self.myGrid.AppendRows(self.AppendTotal, True)
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
try:
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
except:
pass
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
TXT_FLAG = 0
self.control.write("Unable to open txt." + "\n")
try:
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
pass
if(PNG_FLAG == 1):
try:
open(DirFilePng, "r")
self.png = wx.Image(DirFilePng, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
except:
PNG_FLAG = 0
self.control.write("Unable to open png." + "\n")
try:
self.root_widget_id = 500
self.root_widget_text = 550
for i in range(self.root_widget_id, self.root_widget_end):
self.heatmap_ids[i].Destroy()
for i in range(self.root_widget_text, self.rwtend):
self.heatmap_ids[i].Destroy()
self.RunButton2.Destroy()
except:
pass
self.InteractivePanelUpdate(event)
if(PNG_FLAG == 1 and TXT_FLAG == 0):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
if(PNG_FLAG == 0 and TXT_FLAG == 1):
self.nb.SetSelection(0)
if(PNG_FLAG == 1 and TXT_FLAG == 1):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
except Exception: pass
def SelectedBottomTreeID(self, event):
#This handles the selection of an item in the BOTTOM tree browser; represents a file most of the time.
item = event.GetItem()
itemObject = self.browser2.GetItemData(item).GetData()
Parameters = itemObject.split("|")
file_extension = Parameters[1][1:-1]
file_extension.replace("'", "")
file_extension = file_extension.split(",")
file_exts = []
TXT_FLAG = 0
PNG_FLAG = 0
for i in file_extension:
i = i.replace("'", "")
i = i.replace(" ", "")
file_exts.append(i)
for i in file_exts:
if(i == ".txt"):
TXT_FLAG = 1
if(i == ".png"):
PNG_FLAG = 1
DirPath = self.main_results_directory + "/" + Parameters[2]
if(DirPath[-1] != "/"):
DirPath = DirPath + "/"
DirFile = DirPath + Parameters[0]
self.CurrentFile = DirFile
self.control.write("Displaying file: " + DirFile + "\n")
title_name = DirFile.split("/")
title_name = title_name[-1]
self.DirFile = unique.filepath(DirFile)
self.IntFileTxt.Clear()
self.IntFileTxt.write(self.DirFile)
self.DirFileTxt = DirFile + ".txt"
DirFilePng = DirFile + ".png"
self.myGrid.ClearGrid()
try:
self.LOGO.Destroy()
except:
pass
try:
self.PanelTitle.Destroy()
except:
pass
font = wx.Font(16, wx.SWISS, wx.NORMAL, wx.BOLD)
self.PanelTitle = wx.StaticText(self.panel, label=title_name, pos=(5, 7))
self.PanelTitle.SetFont(font)
#PNG_FLAG and TXT_FLAG are flags that sense the presence of an image or text file.
if(PNG_FLAG == 1):
try:
open(DirFilePng, "r")
self.png = wx.Image(DirFilePng, wx.BITMAP_TYPE_ANY).ConvertToBitmap()
self.LOGO = wx.StaticBitmap(self.page1, -1, self.png, (0,0), (self.png.GetWidth(), self.png.GetHeight()), style=wx.ALIGN_CENTER)
imgsizer_v = wx.BoxSizer(wx.VERTICAL)
imgsizer_v.Add(self.LOGO, proportion=0, flag=wx.ALIGN_CENTER_HORIZONTAL|wx.ALIGN_CENTER_VERTICAL)
self.page1.SetSizer(imgsizer_v)
self.page1.Layout()
except:
PNG_FLAG = 0
self.control.write("Unable to open png." + "\n")
if(TXT_FLAG == 1):
try:
self.myGrid.DeleteRows(100, self.AppendTotal, True)
except:
pass
try:
count=0
#table_file = open(self.DirFileTxt, "r")
table_file_contents = []
column_lengths = []
for line in open(self.DirFileTxt,'rU').xreadlines():
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
column_lengths.append(len(line))
table_file_contents.append((line))
count+=1
if count>2000:break
self.max_column_length = max(column_lengths)
self.table_length = len(table_file_contents)
if(self.table_length > 100 and self.table_length < 5000):
self.AppendTotal = self.table_length - 100
self.myGrid.AppendRows(self.AppendTotal, True)
if(self.table_length >= 5000):
self.AppendTotal = 5000
self.myGrid.AppendRows(self.AppendTotal, True)
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
for item_list in table_file_contents:
y_count = 0
for item in item_list:
try: self.myGrid.SetCellValue(x_count, y_count, item) ###Here
except Exception:
### Unclear why this is throwing an error
#print traceback.format_exc()
#print x_count, y_count, item;sys.exit()
pass
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
print traceback.format_exc()
TXT_FLAG = 0
self.control.write("Unable to open txt." + "\n")
DATASET_FIND_FLAG = re.findall("DATASET", self.DirFileTxt)
count=0
if(len(DATASET_FIND_FLAG) > 0):
try:
#table_file = open(self.DirFileTxt, "rU")
table_file_contents = []
pre_sort2 = []
header = []
t_c = 0
column_clusters_flat = 0
answer = "AC"
for line in open(self.DirFileTxt,'rU').xreadlines():
#for line in table_file:
count+=1
if count>2000:
break
try:
line = line.rstrip(); line = string.replace(line,'"','')
line = line.split("\t")
if(t_c == 0):
header.append((line))
t_c = t_c + 1
index=0
for i in line:
if 'ANOVA-rawp' in i: answer = index
index+=1
continue
if(line[0] == "column_clusters-flat"):
header.append((line))
column_clusters_flat = 1
continue
line_sort_select = line[answer]
pre_sort1 = []
count = 0
for i in line:
if(count == 0):
try:
pre_sort1.append(float(line_sort_select))
except:
pre_sort1.append(line_sort_select)
pre_sort1.append(i)
if(count == answer):
count = count + 1
continue
if(count != 0):
pre_sort1.append(i)
count = count + 1
pre_sort2.append((pre_sort1))
except:
continue
table_file_contents.append(header[0])
if(column_clusters_flat == 1):
table_file_contents.append(header[1])
pre_sort2 = sorted(pre_sort2)
for line in pre_sort2:
try:
final_count1 = 0
final_count2 = 1
send_list = []
for item in line:
if(final_count1 == 0):
send_list.append(line[final_count2])
if(final_count1 == answer):
send_list.append(str(line[0]))
if(final_count1 != 0 and final_count1 != answer):
if(final_count1 < answer):
send_list.append(line[final_count2])
if(final_count1 > answer):
send_list.append(line[final_count1])
final_count1 = final_count1 + 1
final_count2 = final_count2 + 1
if(len(table_file_contents) >= 5100):
break
table_file_contents.append((send_list))
except:
continue
for cell in self.ColoredCellList:
self.myGrid.SetCellBackgroundColour(cell[0], cell[1], wx.WHITE)
self.ColoredCellList = []
x_count = 0
try:
for item_list in table_file_contents:
y_count = 0
for item in item_list:
self.myGrid.SetCellValue(x_count, y_count, item)
if(x_count == 0):
self.myGrid.SetCellFont(x_count, y_count, wx.Font(12, wx.SWISS, wx.NORMAL, wx.BOLD))
y_count = y_count + 1
x_count = x_count + 1
except:
pass
self.myGrid.AutoSizeRows(True)
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
self.control.write("Unable to sort." + "\n")
self.nb.SetSelection(0)
self.InteractivePanelUpdate(event)
try:
self.myGrid.AutoSize()
for i in range(50):
colsize = self.myGrid.GetColSize(i)
if(colsize > 200):
self.myGrid.SetColSize(i, 200)
self.page2.Layout()
except:
pass
if(PNG_FLAG == 1 and TXT_FLAG == 0):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
if(PNG_FLAG == 0 and TXT_FLAG == 1):
self.nb.SetSelection(0)
if(PNG_FLAG == 1 and TXT_FLAG == 1):
self.nb.SetSelection(1)
self.Layout()
self.page1.Layout()
def InteractivePanelUpdate(self, event):
#Both the PCA UI and Heatmap UI share the same panel, so buttons and text boxes (as well as other GUI) will have to be destroyed/hidden
#whenever a new type of interactivity is selected.
self.IntFileTxt.Hide()
self.InteractiveFileLabel.Hide()
self.Yes1Label.Hide()
self.No1Label.Hide()
self.D_3DLabel.Hide()
self.D_2DLabel.Hide()
self.IncludeLabelsRadio.Hide()
self.No1Radio.Hide()
self.D_3DRadio.Hide()
self.D_2DRadio.Hide()
self.Opt1Desc.Hide()
self.Opt2Desc.Hide()
self.RunButton1.Hide()
self.Divider1.Hide()
self.InteractiveDefaultMessage.Hide()
try:
self.root_widget_id = 500
self.root_widget_text = 550
for i in range(self.root_widget_id, self.root_widget_end):
self.heatmap_ids[i].Destroy()
for i in range(self.root_widget_text, self.rwtend):
self.heatmap_ids[i].Destroy()
self.RunButton2.Destroy()
except:
pass
PCA_RegEx = re.findall("PCA", self.DirFile)
if(len(PCA_RegEx) > 0):
self.IntFileTxt.Show()
self.InteractiveFileLabel.Show()
self.Yes1Label.Show()
self.No1Label.Show()
self.D_3DLabel.Show()
self.D_2DLabel.Show()
self.IncludeLabelsRadio.Show()
self.No1Radio.Show()
self.D_3DRadio.Show()
self.D_2DRadio.Show()
self.Opt1Desc.Show()
self.Opt2Desc.Show()
self.RunButton1.Show()
self.Divider1.Show()
Heatmap_RegEx = re.findall("hierarchical", self.DirFile)
if(len(Heatmap_RegEx) > 0):
#Heatmap Setup
os.chdir(parentDirectory)
options_open = open(unique.filepath(currentDirectory+"/options.txt"), "rU")
heatmap_array = []
self.heatmap_ids = {}
self.heatmap_translation = {}
supported_geneset_types = UI.getSupportedGeneSetTypes(self.species,'gene-mapp')
supported_geneset_types += UI.getSupportedGeneSetTypes(self.species,'gene-go')
supported_geneset_types_alt = [self.geneset_type]
supported_genesets = self.supported_genesets
for line in options_open:
line = line.split("\t")
variable_name,displayed_title,display_object,group,notes,description,global_default,options = line[:8]
options = string.split(options,'|')
if(group == "heatmap"):
if(display_object == "file"):
continue
od = UI.OptionData(variable_name,displayed_title,display_object,notes,options,global_default)
od.setDefaultOption(global_default)
#"""
if variable_name == 'ClusterGOElite':
od.setArrayOptions(['None Selected','all']+supported_geneset_types)
elif variable_name == 'GeneSetSelection':
od.setArrayOptions(['None Selected']+supported_geneset_types_alt)
elif variable_name == 'PathwaySelection':
od.setArrayOptions(['None Selected']+supported_genesets)
elif od.DefaultOption() == '':
od.setDefaultOption(od.Options()[0])
if od.DefaultOption() == '---':
od.setDefaultOption('')#"""
heatmap_array.append(od)
#heatmap_array.append((line[1], line[2], line[7], line[6]))
os.chdir(currentDirectory)
root_widget_y_pos = 45
self.root_widget_id = 500
self.root_widget_text = 550
for od in heatmap_array:
#od.VariableName()
id = wx.NewId()
#print od.VariableName(),od.Options()
self.heatmap_translation[od.VariableName()] = self.root_widget_id
self.heatmap_ids[self.root_widget_text] = wx.StaticText(self.page3, self.root_widget_text, label=od.Display(), pos=(150, root_widget_y_pos))
if(od.DisplayObject() == "comboBox" or od.DisplayObject() == "multiple-comboBox"):
self.heatmap_ids[self.root_widget_id] = wx.ComboBox(self.page3, self.root_widget_id, od.DefaultOption(), (10, root_widget_y_pos), (120,25), od.Options(), wx.CB_DROPDOWN)
else:
self.heatmap_ids[self.root_widget_id] = wx.TextCtrl(self.page3, self.root_widget_id, od.DefaultOption(), (10, root_widget_y_pos), (120,25))
self.root_widget_id = self.root_widget_id + 1
self.root_widget_text = self.root_widget_text + 1
root_widget_y_pos = root_widget_y_pos + 25
self.rwtend = self.root_widget_text
self.root_widget_end = self.root_widget_id
self.RunButton2 = wx.Button(self.page3, id=599, label="Run", pos=(175, (self.root_widget_end + 10)), size=(120, bheight))
self.Bind(wx.EVT_BUTTON, self.InteractiveRun, id=599)
if(len(PCA_RegEx) == 0 and len(Heatmap_RegEx) == 0):
self.InteractiveDefaultMessage.Show()
def ClearVisualPanel(self, event):
#Deletes the current image on the viewing panel. Unstable and mostly broken; may be removed from future versions.
popup = wx.MessageDialog(None, "Are you sure you want to clear the visual panel?", "Warning", wx.YES_NO)
popup_answer = popup.ShowModal()
if(popup_answer == 5103):
try:
self.LOGO.Destroy()
self.panel2.Layout()
except:
pass
try:
self.myGrid.ClearGrid()
self.panel2.Layout()
except:
pass
popup.Destroy()
self.control.write("Visual panel cleared." + "\n")
else:
return
def InteractiveRun(self, event):
#This function is bound to the "Run" button on the interactive tab GUI. Generates an interactive plot.
#Currently updates on the panel are a priority and many changes may come with it.
RegExHeat = re.findall("hierarchical", self.DirFile)
if(len(RegExHeat) > 0):
for VariableName in self.heatmap_translation:
#self.control.write(str(self.heatmap_ids[self.heatmap_translation[VariableName]].GetValue()) + " " + str(VariableName) + " " + str(self.heatmap_ids[self.heatmap_translation[VariableName]]) + "\n")
try:
self.heatmap_translation[VariableName] = str(self.heatmap_ids[self.heatmap_translation[VariableName]].GetValue())
#print self.heatmap_translation[VariableName]
except Exception: pass
try:
#self.control.write(self.DirFile + "\n")
input_file_dir = self.DirFile + ".txt"
column_metric = self.heatmap_translation['column_metric']; #self.control.write(column_metric + "\n")
column_method = self.heatmap_translation['column_method']; #self.control.write(column_method + "\n")
row_metric = self.heatmap_translation['row_metric']; #self.control.write(row_metric + "\n")
row_method = self.heatmap_translation['row_method']; #self.control.write(row_method+ "\n")
color_gradient = self.heatmap_translation['color_selection']; #self.control.write(color_gradient + "\n")
cluster_rows = self.heatmap_translation['cluster_rows']; #self.control.write(cluster_rows + "\n")
cluster_columns = self.heatmap_translation['cluster_columns']; #self.control.write(cluster_columns + "\n")
normalization = self.heatmap_translation['normalization']; #self.control.write(normalization + "\n")
contrast = self.heatmap_translation['contrast']; #self.control.write(contrast + "\n")
transpose = self.heatmap_translation['transpose']; #self.control.write(transpose + "\n")
GeneSetSelection = self.heatmap_translation['GeneSetSelection']; #self.control.write(GeneSetSelection + "\n")
PathwaySelection = self.heatmap_translation['PathwaySelection']; #self.control.write(PathwaySelection + "\n")
OntologyID = self.heatmap_translation['OntologyID']; #self.control.write(OntologyID + "\n")
GeneSelection = self.heatmap_translation['GeneSelection']; #self.control.write(GeneSelection + "\n")
justShowTheseIDs = self.heatmap_translation['JustShowTheseIDs']; #self.control.write(JustShowTheseIDs + "\n")
HeatmapAdvanced = self.heatmap_translation['HeatmapAdvanced']; #self.control.write(HeatmapAdvanced + "\n")
clusterGOElite = self.heatmap_translation['ClusterGOElite']; #self.control.write(ClusterGOElite + "\n")
heatmapGeneSets = self.heatmap_translation['heatmapGeneSets']; #self.control.write(heatmapGeneSets + "\n")
if cluster_rows == 'no': row_method = None
if cluster_columns == 'no': column_method = None
HeatmapAdvanced = (HeatmapAdvanced,)
#print ['JustShowTheseIDs',justShowTheseIDs]
if self.DirFile not in self.heatmap_run:
self.heatmap_run[self.DirFile]=None
### occurs when automatically running the heatmap
column_method = None
row_method = None
color_gradient = 'yellow_black_blue'
normalization = 'median'
translate={'None Selected':'','Exclude Cell Cycle Effects':'excludeCellCycle','Top Correlated Only':'top','Positive Correlations Only':'positive','Perform Iterative Discovery':'driver', 'Intra-Correlated Only':'IntraCorrelatedOnly', 'Perform Monocle':'monocle'}
try:
if 'None Selected' in HeatmapAdvanced: ('None Selected')
except Exception: HeatmapAdvanced = ('None Selected')
if ('None Selected' in HeatmapAdvanced and len(HeatmapAdvanced)==1) or 'None Selected' == HeatmapAdvanced: pass
else:
#print HeatmapAdvanced,'kill'
try:
GeneSelection += ' '+string.join(list(HeatmapAdvanced),' ')
for name in translate:
GeneSelection = string.replace(GeneSelection,name,translate[name])
GeneSelection = string.replace(GeneSelection,' ',' ')
if 'top' in GeneSelection or 'driver' in GeneSelection or 'excludeCellCycle' in GeneSelection or 'positive' in GeneSelection or 'IntraCorrelatedOnly' in GeneSelection:
GeneSelection+=' amplify'
except Exception: pass
GeneSetSelection = string.replace(GeneSetSelection,'\n',' ')
GeneSetSelection = string.replace(GeneSetSelection,'\r',' ')
if justShowTheseIDs == '': justShowTheseIDs = 'None Selected'
if GeneSetSelection== '': GeneSetSelection = 'None Selected'
if PathwaySelection== '': PathwaySelection = 'None Selected'
try: rho = float(self.heatmap_translation['CorrelationCutoff'])
except Exception: rho=None
if transpose == 'yes': transpose = True
else: transpose = False
vendor = 'RNASeq'
color_gradient = string.replace(color_gradient,'-','_')
if GeneSetSelection != 'None Selected' or GeneSelection != '' or normalization != 'NA' or JustShowTheseIDs != '' or JustShowTheseIDs != 'None Selected':
gsp = UI.GeneSelectionParameters(self.species,self.platform,vendor)
if rho!=None:
try:
gsp.setRhoCutoff(rho)
GeneSelection = 'amplify '+GeneSelection
except Exception: print 'Must enter a valid Pearson correlation cutoff (float)',traceback.format_exc()
gsp.setGeneSet(GeneSetSelection)
gsp.setPathwaySelect(PathwaySelection)
gsp.setGeneSelection(GeneSelection)
gsp.setOntologyID(OntologyID)
gsp.setTranspose(transpose)
gsp.setNormalize(normalization)
gsp.setJustShowTheseIDs(justShowTheseIDs)
gsp.setClusterGOElite(clusterGOElite)
transpose = gsp ### this allows methods that don't transmit this object to also work
if row_method == 'no': row_method = None
if column_method == 'no': column_method = None
#print [GeneSetSelection, PathwaySelection,OntologyID]
remoteCallToAltAnalyze = False
#try: print [gsp.ClusterGOElite()]
#except Exception: print 'dog', traceback.format_exc()
except Exception:
print traceback.format_exc()
if remoteCallToAltAnalyze == False:
try: UI.createHeatMap(input_file_dir, row_method, row_metric, column_method, column_metric, color_gradient, transpose, contrast, None, display=True)
except Exception: print traceback.format_exc()
else:
try:
command = ['--image', 'hierarchical','--species', self.species,'--platform',self.platform,'--input',input_file_dir, '--display', 'True']
command += ['--column_method',str(column_method),'--column_metric',column_metric]
command += ['--row_method',str(row_method),'--row_metric',row_metric]
command += ['--normalization',normalization,'--transpose',str(transpose),'--contrast',contrast,'--color_gradient',color_gradient]
#print command
command_str = string.join(['']+command,' ')
#print command
package_path = unique.filepath('python')
mac_package_path = string.replace(package_path,'python','AltAnalyze.app/Contents/MacOS/AltAnalyze')
#os.system(mac_package_path+command_str);sys.exit()
import subprocess
#subprocess.call([mac_package_path, 'C:\\test.txt'])
usePopen = True
if os.name == 'nt':
command = [mac_package_path]+command
DETACHED_PROCESS = 0x00000008
pid = subprocess.Popen(command, creationflags=DETACHED_PROCESS).pid
else:
command = [mac_package_path]+command
if usePopen:
alt_command = ["start"]+command
alt_command = ["start",mac_package_path]
subprocess.call(command) #works but runs in back of the application, detatched
if usePopen==False:
### sampe issue as subprocess.Popen
pid = os.fork()
if pid ==0:
os.execv(mac_package_path,command) ### Kills the parent app
os._exit(0)
"""
retcode = subprocess.call([
apt_file, "-d", cdf_file, "--kill-list", kill_list_dir, "-a", algorithm, "-o", output_dir,
"--cel-files", cel_dir, "-a", "pm-mm,mas5-detect.calls=1.pairs=1"])"""
except Exception:
print traceback.format_exc()
else:
os.chdir(parentDirectory)
RegExMatch = re.findall("exp.", self.DirFile)
if(len(RegExMatch) == 0):
InputFile = self.DirFile.replace("-3D", "")
InputFile = InputFile.replace("-PCA", "")
InputFile = InputFile.replace("DataPlots/Clustering-", "ExpressionOutput/Clustering/")
input_file_dir= InputFile + ".txt"
else:
InputFile = self.DirFile.replace("-3D", "")
InputFile = InputFile.replace("-PCA", "")
InputFile = InputFile.replace("DataPlots/Clustering-", "ExpressionInput/")
input_file_dir= InputFile + ".txt"
if(self.IncludeLabelsRadio.GetValue() == True):
include_labels= 'yes'
else:
include_labels= 'no'
pca_algorithm = 'SVD'
transpose = False
if self.runPCA == False:
include_labels = 'no'
if(self.D_3DRadio.GetValue() == True):
plotType = '3D'
else:
plotType = '2D'
display = True
self.runPCA = True
count,columns = self.verifyFileLength(input_file_dir)
if columns == 3: plotType = '2D' ### only 2 components possible for 2 samples
if count>0:
UI.performPCA(input_file_dir, include_labels, pca_algorithm, transpose, None, plotType=plotType, display=display)
else:
self.control.write('PCA input file not present: '+input_file_dir+'\n')
os.chdir(currentDirectory)
self.InteractivePanelUpdate(event)
def verifyFileLength(self,filename):
count = 0; columns=0
try:
fn=unique.filepath(filename)
for line in open(fn,'rU').xreadlines():
t = string.split(line,'\t')
columns = len(t)
count+=1
if count>9: break
except Exception: null=[]
return count,columns
def OnAbout(self, event):
#Brings up the developer information. Non-functional currently but will be updated eventually.
dial = wx.MessageDialog(None, 'AltAnalyze Results Viewer\nVersion 0.5\n2015', 'About', wx.OK)
dial.ShowModal()
def OnHelp(self, event):
#Brings up the tutorial and dorumentation. Will be updated to a .pdf in the future.
os.chdir(parentDirectory)
ManualPath = rootDirectory + "/Documentation/ViewerManual.pdf"
subprocess.Popen(['open', ManualPath])
os.chdir(currentDirectory)
class ImageFrame(wx.Frame):
#Obsolete code, will be removed almost certainly.
title = "Image"
def __init__(self):
wx.Frame.__init__(self, None, title=self.title)
def remoteViewer(app):
fr = Main(parent=None,id=1)
fr.Show()
app.MainLoop()
if __name__ == "__main__":
app = wx.App(False)
fr = Main(parent=None,id=1)
fr.Show()
app.MainLoop()
| apache-2.0 |
hgrif/ds-utils | dsutils/sklearn.py | 1 | 2913 | import numpy as np
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
def multiclass_roc_auc_score(y_true, y_score, label_binarizer=None, **kwargs):
"""Compute ROC AUC score for multiclass.
:param y_true: true multiclass predictions [n_samples]
:param y_score: multiclass scores [n_samples, n_classes]
:param label_binarizer: Binarizer to use (sklearn.preprocessing.LabelBinarizer())
:param kwargs: Additional keyword arguments for sklearn.metrics.roc_auc_score
:return: Multiclass ROC AUC score
"""
if label_binarizer is None:
label_binarizer = preprocessing.LabelBinarizer()
binarized_true = label_binarizer.fit_transform(y_true)
score = metrics.roc_auc_score(binarized_true, y_score, **kwargs)
return score
def split_train_test(y, do_split_stratified=True, **kwargs):
"""Get indexes to split y in train and test sets.
:param y: Labels of samples
:param do_split_stratified: Use StratifiedShuffleSplit (else ShuffleSplit)
:param kwargs: Keyword arguments StratifiedShuffleSplit or ShuffleSplit
:return: (train indexes, test indexes)
"""
if do_split_stratified:
data_splitter = cross_validation.StratifiedShuffleSplit(y, n_iter=1,
**kwargs)
else:
data_splitter = cross_validation.ShuffleSplit(y, n_iter=1, **kwargs)
train_ix, test_ix = data_splitter.__iter__().next()
return train_ix, test_ix
class OrderedLabelEncoder(preprocessing.LabelEncoder):
"""Encode labels with value between 0 and n_classes-1 in specified order.
See also
--------
sklearn.preprocessing.LabelEncoder
"""
def __init__(self, classes):
self.classes_ = np.array(classes, dtype='O')
def fit(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def fit_transform(self, y):
""" Deprecated method.
"""
raise Exception('Invalid method: method is deprecated')
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
self._check_fitted()
classes = np.array(np.unique(y), dtype='O')
preprocessing.label._check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
transformed_y = np.zeros_like(y, dtype=int)
for i_class, current_class in enumerate(self.classes_):
transformed_y[np.array(y) == current_class] = i_class
return transformed_y | mit |
amnet04/ALECMAPREADER1 | funcionesCV_recurrentes.py | 1 | 4438 | import numpy as np
import pandas
import cv2
def cargar_imagen(archivo):
'''
Carga en variables dos matrices de la imágen, una gris y otra a color,
devuelve un diccionario con las dos versiones.
'''
imagen = {}
imagen['gris'] = cv2.imread(archivo,0)
imagen['color'] = cv2.imread(archivo)
return(imagen)
def dilatar_imagen(img, umbral_blanco, umbral_negro, dim_kernel, iteraciones):
ret,thresh = cv2.threshold(img, umbral_blanco,umbral_negro,cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, dim_kernel)
dilatada= cv2.dilate(thresh,kernel,iterations = iteraciones)
return(dilatada)
def erosionar_imagen(img, umbral_blanco, umbral_negro, dim_kernel, iteraciones):
ret,thresh = cv2.threshold(img, umbral_blanco,umbral_negro,cv2.THRESH_BINARY_INV)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, dim_kernel)
erosionada = cv2.erode(thresh,kernel,iterations = iteraciones)
return(erosionada)
def dibujar_rectangulos(img,x1,y1,x2,y2,color,ancho_bordes,archivo=''):
cv2.rectangle(img,(x1,y1),(x2,y2),(color),ancho_bordes)
# if archivo !='':
# cv2.imwrite(archivo,img)
def cortar_imagen(img,x1,x2,y1,y2):
corte = img[y1:y2,x1:x2]
img_cortada = {}
img_cortada['im'] = corte
img_cortada['x1'] = x1
img_cortada['y1'] = y1
img_cortada['x2'] = x2
img_cortada['y2'] = y2
return(img_cortada)
def bw_otsu(img, umbral_blanco,limite,blur=0,blur_ori =0):
'''
blur es el shape del blur en tupla por ejemplo (5,5)
blur_ori es un entero. Si no se ponen valores no hace el blur
'''
if blur == (0,0):
blureada = img
else:
blureada = cv2.GaussianBlur(img,blur,blur_ori)
ret,th = cv2.threshold(blureada,umbral_blanco,limite,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
return (th)
def bw_adapta(img,limite,tam,sh):
th = cv2.adaptiveThreshold(img,limite,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,tam,sh)
return (th)
def ver_imagen(img,title='solo pa vé'):
cv2.imshow(title, img)
cv2.waitKey()
cv2.destroyAllWindows()
def detectar(template, imagen, max_var_thresh):
'''
Detacta si el la imagen tiene coincidencias en el mapa y devuelve la
coordenada superior izquierda de la coincidencia, su altura y su ancho
en la imagen del mapa general
'''
imagen_gris = cv2.cvtColor(imagen, cv2.COLOR_RGB2GRAY)
imagen_bw = bw_adapta(imagen_gris, 255, 71, 30)
h, w = template.shape
coincidencia = cv2.matchTemplate(template, imagen_bw, cv2.TM_CCOEFF)
min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(coincidencia)
x1 = max_loc[0]
x2 = max_loc[0] + w
y1 = max_loc[1]
y2 = max_loc[1] + h
if max_val < max_var_thresh:
#cv2.imwrite('Pruebas/tast.jpg',imagen[y1:y2,x1:x2])
return(None, max_val)
else:
#print (max_val)
sup_izq = (x1,y1)
inf_der = (x2,y2)
roi = imagen[y1:y2,x1:x2]
return(sup_izq, inf_der, roi)
def detectar_recursivo(template, imagen, thresh):
imagen_gris = cv2.cvtColor(imagen, cv2.COLOR_RGB2GRAY)
imagen_bw = bw_adapta(imagen_gris, 255, 71, 30)
h, w = template.shape
res = cv2.matchTemplate(imagen_bw,template,cv2.TM_CCOEFF_NORMED)
loc = np.where(res>=thresh)
puntos = []
for punto in zip(*loc[::-1]):
puntos.append(punto)
return (puntos, h, w)
def detectar_area_contornos(imagen,
umbral_blanco,
umbral_negro,
dim_kernel,
iteraciones,
w, h):
if dim_kernel != (0,0):
imagen_dilatada = dilatar_imagen(imagen,
umbral_blanco,
umbral_negro,
dim_kernel,
iteraciones)
else:
imagen_dilatada = imagen
imagen, contours, hierarchy = cv2.findContours(imagen_dilatada,
cv2.RETR_TREE,
cv2.CHAIN_APPROX_SIMPLE)
areas = []
for contour in contours:
[x,y,wc,hc] = cv2.boundingRect(contour)
x1 = x
y1 = y
x2 = x+wc
y2 = y+hc
if (wc > w) and (hc > h):
areas.append((x1, y1 , x2, y2))
return (areas)
| mit |
schreiberx/sweet | benchmarks_plane/nonlinear_interaction/pp_plot_errors_single.py | 2 | 2935 | #! /usr/bin/env python3
import sys
import matplotlib
matplotlib.use('Agg')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from matplotlib.lines import Line2D
from mule.postprocessing.JobsData import *
from mule.postprocessing.JobsDataConsolidate import *
if len(sys.argv) > 1:
muletag = sys.argv[1]
output_filename = sys.argv[2]
else:
print("")
print("Usage:")
print("")
print(" "+sys.argv[0]+" [jobdata mule tag for y axis] [output_filename.pdf] [jobdir1] [jobdir2] ... [jobdirN]")
print("")
sys.exit(1)
if len(sys.argv) > 3:
# Load Jobs specified via program parameters
jd = JobsData(job_dirs=sys.argv[3:])
else:
# Load all Jobs
jd = JobsData()
# Consolidate data...
jdc = JobsDataConsolidate(jd)
# ... which belongs to the same time integration method
jdc_groups = jdc.create_groups(['runtime.timestepping_method'])
#
# Filter to exclude data which indicates instabilities
#
def data_filter(x, y, jd):
if y == None:
return True
if 'runtime.max_simulation_time' in jd:
if jd['runtime.max_simulation_time'] <= 24*60*60:
if y > 100:
return True
elif jd['runtime.max_simulation_time'] <= 10*24*60*60:
if y > 1000:
return True
return False
# Exctract data suitable for plotting
jdc_groups_data = JobsData_GroupsPlottingScattered(
jdc_groups,
'runtime.timestep_size',
muletag,
data_filter=data_filter
)
data = jdc_groups_data.get_data()
def label(d):
val = d['runtime.timestepping_method'].replace('_', '\\_')+', $\Delta t = '+str(d['runtime.timestep_size'])+'$'
return val
##########################################################
# Plotting starts here
##########################################################
print("*"*80)
print("*"*80)
print("*"*80)
fontsize=18
figsize=(10, 10)
fig, ax = plt.subplots(figsize=(10,10))
#plt.rc('text', usetex=True)
ax.set_xscale("log", nonposx='clip')
ax.set_yscale("log", nonposy='clip')
colors = ['b', 'g', 'r', 'c', 'm', 'y', 'k']
markers = []
for m in Line2D.markers:
try:
if len(m) == 1 and m != ' ' and m != '':
markers.append(m)
except TypeError:
pass
linestyles = ['-', '--', ':', '-.']
c = 0
title = ''
for key, d in data.items():
x = d['x_values']
y = d['y_values']
l = key.replace('_', '\\_')
print(" + "+l)
print(x)
print(y)
ax.plot(x, y, marker=markers[c % len(markers)], linestyle=linestyles[c % len(linestyles)], label=l)
c = c + 1
if title != '':
plt.title(title, fontsize=fontsize)
plt.xlabel("Timestep size $\Delta t$ (sec)", fontsize=fontsize)
#
# Name of data
#
dataname = "TODO"
if 'prog_h' in muletag:
dataname = "surface height $h$"
#
# Norm
#
if 'linf' in muletag:
norm = "$L_\infty$"
else:
norm = "$L_{TODO}$"
plt.ylabel(norm+" error on "+dataname, fontsize=fontsize)
plt.legend(fontsize=15)
plt.savefig(output_filename, transparent=True, bbox_inches='tight', pad_inches=0)
plt.close()
| mit |
ChanChiChoi/scikit-learn | sklearn/cluster/tests/test_birch.py | 342 | 5603 | """
Tests for the birch clustering algorithm.
"""
from scipy import sparse
import numpy as np
from sklearn.cluster.tests.common import generate_clustered_data
from sklearn.cluster.birch import Birch
from sklearn.cluster.hierarchical import AgglomerativeClustering
from sklearn.datasets import make_blobs
from sklearn.linear_model import ElasticNet
from sklearn.metrics import pairwise_distances_argmin, v_measure_score
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
def test_n_samples_leaves_roots():
# Sanity check for the number of samples in leaves and roots
X, y = make_blobs(n_samples=10)
brc = Birch()
brc.fit(X)
n_samples_root = sum([sc.n_samples_ for sc in brc.root_.subclusters_])
n_samples_leaves = sum([sc.n_samples_ for leaf in brc._get_leaves()
for sc in leaf.subclusters_])
assert_equal(n_samples_leaves, X.shape[0])
assert_equal(n_samples_root, X.shape[0])
def test_partial_fit():
# Test that fit is equivalent to calling partial_fit multiple times
X, y = make_blobs(n_samples=100)
brc = Birch(n_clusters=3)
brc.fit(X)
brc_partial = Birch(n_clusters=None)
brc_partial.partial_fit(X[:50])
brc_partial.partial_fit(X[50:])
assert_array_equal(brc_partial.subcluster_centers_,
brc.subcluster_centers_)
# Test that same global labels are obtained after calling partial_fit
# with None
brc_partial.set_params(n_clusters=3)
brc_partial.partial_fit(None)
assert_array_equal(brc_partial.subcluster_labels_, brc.subcluster_labels_)
def test_birch_predict():
# Test the predict method predicts the nearest centroid.
rng = np.random.RandomState(0)
X = generate_clustered_data(n_clusters=3, n_features=3,
n_samples_per_cluster=10)
# n_samples * n_samples_per_cluster
shuffle_indices = np.arange(30)
rng.shuffle(shuffle_indices)
X_shuffle = X[shuffle_indices, :]
brc = Birch(n_clusters=4, threshold=1.)
brc.fit(X_shuffle)
centroids = brc.subcluster_centers_
assert_array_equal(brc.labels_, brc.predict(X_shuffle))
nearest_centroid = pairwise_distances_argmin(X_shuffle, centroids)
assert_almost_equal(v_measure_score(nearest_centroid, brc.labels_), 1.0)
def test_n_clusters():
# Test that n_clusters param works properly
X, y = make_blobs(n_samples=100, centers=10)
brc1 = Birch(n_clusters=10)
brc1.fit(X)
assert_greater(len(brc1.subcluster_centers_), 10)
assert_equal(len(np.unique(brc1.labels_)), 10)
# Test that n_clusters = Agglomerative Clustering gives
# the same results.
gc = AgglomerativeClustering(n_clusters=10)
brc2 = Birch(n_clusters=gc)
brc2.fit(X)
assert_array_equal(brc1.subcluster_labels_, brc2.subcluster_labels_)
assert_array_equal(brc1.labels_, brc2.labels_)
# Test that the wrong global clustering step raises an Error.
clf = ElasticNet()
brc3 = Birch(n_clusters=clf)
assert_raises(ValueError, brc3.fit, X)
# Test that a small number of clusters raises a warning.
brc4 = Birch(threshold=10000.)
assert_warns(UserWarning, brc4.fit, X)
def test_sparse_X():
# Test that sparse and dense data give same results
X, y = make_blobs(n_samples=100, centers=10)
brc = Birch(n_clusters=10)
brc.fit(X)
csr = sparse.csr_matrix(X)
brc_sparse = Birch(n_clusters=10)
brc_sparse.fit(csr)
assert_array_equal(brc.labels_, brc_sparse.labels_)
assert_array_equal(brc.subcluster_centers_,
brc_sparse.subcluster_centers_)
def check_branching_factor(node, branching_factor):
subclusters = node.subclusters_
assert_greater_equal(branching_factor, len(subclusters))
for cluster in subclusters:
if cluster.child_:
check_branching_factor(cluster.child_, branching_factor)
def test_branching_factor():
# Test that nodes have at max branching_factor number of subclusters
X, y = make_blobs()
branching_factor = 9
# Purposefully set a low threshold to maximize the subclusters.
brc = Birch(n_clusters=None, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
brc = Birch(n_clusters=3, branching_factor=branching_factor,
threshold=0.01)
brc.fit(X)
check_branching_factor(brc.root_, branching_factor)
# Raises error when branching_factor is set to one.
brc = Birch(n_clusters=None, branching_factor=1, threshold=0.01)
assert_raises(ValueError, brc.fit, X)
def check_threshold(birch_instance, threshold):
"""Use the leaf linked list for traversal"""
current_leaf = birch_instance.dummy_leaf_.next_leaf_
while current_leaf:
subclusters = current_leaf.subclusters_
for sc in subclusters:
assert_greater_equal(threshold, sc.radius)
current_leaf = current_leaf.next_leaf_
def test_threshold():
# Test that the leaf subclusters have a threshold lesser than radius
X, y = make_blobs(n_samples=80, centers=4)
brc = Birch(threshold=0.5, n_clusters=None)
brc.fit(X)
check_threshold(brc, 0.5)
brc = Birch(threshold=5.0, n_clusters=None)
brc.fit(X)
check_threshold(brc, 5.)
| bsd-3-clause |
appapantula/scikit-learn | sklearn/neighbors/graph.py | 208 | 7031 | """Nearest Neighbors graph functions"""
# Author: Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from .base import KNeighborsMixin, RadiusNeighborsMixin
from .unsupervised import NearestNeighbors
def _check_params(X, metric, p, metric_params):
"""Check the validity of the input parameters"""
params = zip(['metric', 'p', 'metric_params'],
[metric, p, metric_params])
est_params = X.get_params()
for param_name, func_param in params:
if func_param != est_params[param_name]:
raise ValueError(
"Got %s for %s, while the estimator has %s for "
"the same parameter." % (
func_param, param_name, est_params[param_name]))
def _query_include_self(X, include_self, mode):
"""Return the query based on include_self param"""
# Done to preserve backward compatibility.
if include_self is None:
if mode == "connectivity":
warnings.warn(
"The behavior of 'kneighbors_graph' when mode='connectivity' "
"will change in version 0.18. Presently, the nearest neighbor "
"of each sample is the sample itself. Beginning in version "
"0.18, the default behavior will be to exclude each sample "
"from being its own nearest neighbor. To maintain the current "
"behavior, set include_self=True.", DeprecationWarning)
include_self = True
else:
include_self = False
if include_self:
query = X._fit_X
else:
query = None
return query
def kneighbors_graph(X, n_neighbors, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of k-Neighbors for points in X
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
n_neighbors : int
Number of neighbors for each sample.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the k-Neighbors for each sample
point. The DistanceMetric class gives a list of available metrics.
The default distance is 'euclidean' ('minkowski' metric with the p
param equal to 2.)
include_self: bool, default backward-compatible.
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import kneighbors_graph
>>> A = kneighbors_graph(X, 2)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
radius_neighbors_graph
"""
if not isinstance(X, KNeighborsMixin):
X = NearestNeighbors(n_neighbors, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.kneighbors_graph(X=query, n_neighbors=n_neighbors, mode=mode)
def radius_neighbors_graph(X, radius, mode='connectivity', metric='minkowski',
p=2, metric_params=None, include_self=None):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
X : array-like or BallTree, shape = [n_samples, n_features]
Sample data, in the form of a numpy array or a precomputed
:class:`BallTree`.
radius : float
Radius of neighborhoods.
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
metric : string, default 'minkowski'
The distance metric used to calculate the neighbors within a
given radius for each sample point. The DistanceMetric class
gives a list of available metrics. The default distance is
'euclidean' ('minkowski' metric with the param equal to 2.)
include_self: bool, default None
Whether or not to mark each sample as the first nearest neighbor to
itself. If `None`, then True is used for mode='connectivity' and False
for mode='distance' as this will preserve backwards compatibilty. From
version 0.18, the default value will be False, irrespective of the
value of `mode`.
p : int, default 2
Power parameter for the Minkowski metric. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params: dict, optional
additional keyword arguments for the metric function.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import radius_neighbors_graph
>>> A = radius_neighbors_graph(X, 1.5)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if not isinstance(X, RadiusNeighborsMixin):
X = NearestNeighbors(radius=radius, metric=metric, p=p,
metric_params=metric_params).fit(X)
else:
_check_params(X, metric, p, metric_params)
query = _query_include_self(X, include_self, mode)
return X.radius_neighbors_graph(query, radius, mode)
| bsd-3-clause |
nuclear-wizard/moose | python/mooseutils/VectorPostprocessorReader.py | 6 | 5970 | #* This file is part of the MOOSE framework
#* https://www.mooseframework.org
#*
#* All rights reserved, see COPYRIGHT for full restrictions
#* https://github.com/idaholab/moose/blob/master/COPYRIGHT
#*
#* Licensed under LGPL 2.1, please see LICENSE for details
#* https://www.gnu.org/licenses/lgpl-2.1.html
import os
import glob
import pandas
import bisect
from .MooseDataFrame import MooseDataFrame
from . import message
class VectorPostprocessorReader(object):
"""
A Reader for MOOSE VectorPostprocessor data.
Args:
pattern[str]: A pattern of files (for use with glob) for loading.
MOOSE outputs VectorPostprocessor data in separate files for each timestep, using the timestep as
a prefix. For example: file_000.csv, file_001.csv, etc.
Therefore, a pattern acceptable for use with the python glob package must be supplied. For the
above files, "file_*.csv" should be supplied.
This object manages the loading and unloading of data and should always be in a valid state,
regardless of the existence of a file. It will also append new data and remove old/deleted data
on subsequent calls to "update()".
"""
def __init__(self, pattern, run_start_time=0):
self._pattern = pattern
self._timedata = MooseDataFrame(self._pattern.replace('*', 'time'),
run_start_time=None,
index='timestep')
self._frames = dict()
self._time = -1
self._index = None
self._run_start_time = run_start_time
self.update()
@property
def data(self):
return self._frames.get(self._index, pandas.DataFrame())
@property
def filename(self):
if self._frames:
return self._frames[self._index].filename
def __getitem__(self, keys):
"""
Operator[] returns the data for the current time.
Args:
keys[str|list]: The key(s) to return.
"""
return self._frames[self._index][keys]
def __bool__(self):
"""
Allows this object to be used in boolean cases.
Example:
data = VectorPostprocessorReader('files_*.csv')
if not data:
print 'No data found!'
"""
return self._index in self._frames
def __contains__(self, variable):
"""
Returns true if the variable exists in the data structure.
"""
return variable in self._frames[self._index]
def times(self):
"""
Returns the list of available time indices contained in the data.
"""
return sorted(self._frames.keys())
def clear(self):
"""
Remove all data.
"""
self._frames = dict()
self._index = None
self._time = None
def variables(self):
"""
Return a list of postprocessor variable names listed in the reader.
"""
if self._index is not None:
return self._frames[self._index].data.columns.tolist()
def update(self, time=None):
"""
Update data by adding/removing files.
time[float]: The time at which the data should be returned.
"""
# Update the time
if time is not None:
self._time = time
# Update the time data file
self._timedata.update()
# The list of files from the supplied pattern
last_modified = 0.0
self._frames = dict()
for fname in sorted(glob.glob(self._pattern)):
if fname.endswith('LATEST') or fname.endswith('FINAL') or (fname == self._timedata.filename):
continue
idx = self._timeHelper(fname)
mdf = self._frames.get(idx, None)
if mdf is None:
mdf = MooseDataFrame(fname, run_start_time=self._run_start_time, update=False,
peacock_index=True)
self._frames[idx] = mdf
if (mdf.modified < last_modified):
self._frames.pop(idx)
elif mdf.filesize == 0:
self._frames.pop(idx)
else:
last_modified = mdf.modified
# Clear the data if empty
if self._frames:
self.__updateCurrentIndex()
df = self._frames.get(self._index, None)
if df is not None:
return df.update()
def repr(self):
"""
Return components for building script.
Returns:
(output, imports) The necessary script and include statements to re-create data load.
"""
imports = ['import mooseutils']
output = ['\n# Read VectorPostprocessor Data']
output += ['data = mooseutils.VectorPostprocessorReader({})'.format(repr(self._pattern))]
return output, imports
def _timeHelper(self, filename):
"""
Determine the time index. (protected)
"""
idx = filename.rfind('_') + 1
tstep = int(filename[idx:-4])
if not self._timedata:
return tstep
else:
try:
return self._timedata['time'].loc[tstep]
except Exception:
return tstep
def __updateCurrentIndex(self):
"""
Helper for setting the current key for the supplied time.
"""
if not self._frames:
index = None
# Return the latest time
elif self._time == -1:
index = self.times()[-1]
# Return the specified time
elif self._time in self._frames:
index = self._time
# Find nearest time
else:
times = self.times()
n = len(times)
idx = bisect.bisect_right(times, self._time) - 1
if idx < 0:
idx = 0
elif idx > n:
idx = -1
index = times[idx]
self._index = index
| lgpl-2.1 |
jlowin/airflow | scripts/perf/scheduler_ops_metrics.py | 30 | 6536 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from datetime import datetime
import logging
import pandas as pd
import sys
from airflow import configuration, settings
from airflow.jobs import SchedulerJob
from airflow.models import DagBag, DagModel, DagRun, TaskInstance
from airflow.utils.state import State
SUBDIR = 'scripts/perf/dags'
DAG_IDS = ['perf_dag_1', 'perf_dag_2']
MAX_RUNTIME_SECS = 6
class SchedulerMetricsJob(SchedulerJob):
"""
This class extends SchedulerJob to instrument the execution performance of
task instances contained in each DAG. We want to know if any DAG
is starved of resources, and this will be reflected in the stats printed
out at the end of the test run. The following metrics will be instrumented
for each task instance (dag_id, task_id, execution_date) tuple:
1. Queuing delay - time taken from starting the executor to the task
instance to be added to the executor queue.
2. Start delay - time taken from starting the executor to the task instance
to start execution.
3. Land time - time taken from starting the executor to task instance
completion.
4. Duration - time taken for executing the task instance.
The DAGs implement bash operators that call the system wait command. This
is representative of typical operators run on Airflow - queries that are
run on remote systems and spend the majority of their time on I/O wait.
To Run:
$ python scripts/perf/scheduler_ops_metrics.py
"""
__mapper_args__ = {
'polymorphic_identity': 'SchedulerMetricsJob'
}
def print_stats(self):
"""
Print operational metrics for the scheduler test.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
successful_tis = filter(lambda x: x.state == State.SUCCESS, tis)
ti_perf = [(ti.dag_id, ti.task_id, ti.execution_date,
(ti.queued_dttm - self.start_date).total_seconds(),
(ti.start_date - self.start_date).total_seconds(),
(ti.end_date - self.start_date).total_seconds(),
ti.duration) for ti in successful_tis]
ti_perf_df = pd.DataFrame(ti_perf, columns=['dag_id', 'task_id',
'execution_date',
'queue_delay',
'start_delay', 'land_time',
'duration'])
print('Performance Results')
print('###################')
for dag_id in DAG_IDS:
print('DAG {}'.format(dag_id))
print(ti_perf_df[ti_perf_df['dag_id'] == dag_id])
print('###################')
if len(tis) > len(successful_tis):
print("WARNING!! The following task instances haven't completed")
print(pd.DataFrame([(ti.dag_id, ti.task_id, ti.execution_date, ti.state)
for ti in filter(lambda x: x.state != State.SUCCESS, tis)],
columns=['dag_id', 'task_id', 'execution_date', 'state']))
session.commit()
def heartbeat(self):
"""
Override the scheduler heartbeat to determine when the test is complete
"""
super(SchedulerMetricsJob, self).heartbeat()
session = settings.Session()
# Get all the relevant task instances
TI = TaskInstance
successful_tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.filter(TI.state.in_([State.SUCCESS]))
.all()
)
session.commit()
dagbag = DagBag(SUBDIR)
dags = [dagbag.dags[dag_id] for dag_id in DAG_IDS]
# the tasks in perf_dag_1 and per_dag_2 have a daily schedule interval.
num_task_instances = sum([(datetime.today() - task.start_date).days
for dag in dags for task in dag.tasks])
if (len(successful_tis) == num_task_instances or
(datetime.now()-self.start_date).total_seconds() >
MAX_RUNTIME_SECS):
if (len(successful_tis) == num_task_instances):
self.logger.info("All tasks processed! Printing stats.")
else:
self.logger.info("Test timeout reached. "
"Printing available stats.")
self.print_stats()
set_dags_paused_state(True)
sys.exit()
def clear_dag_runs():
"""
Remove any existing DAG runs for the perf test DAGs.
"""
session = settings.Session()
drs = session.query(DagRun).filter(
DagRun.dag_id.in_(DAG_IDS),
).all()
for dr in drs:
logging.info('Deleting DagRun :: {}'.format(dr))
session.delete(dr)
def clear_dag_task_instances():
"""
Remove any existing task instances for the perf test DAGs.
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
def set_dags_paused_state(is_paused):
"""
Toggle the pause state of the DAGs in the test.
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
def main():
configuration.load_test_config()
set_dags_paused_state(False)
clear_dag_runs()
clear_dag_task_instances()
job = SchedulerMetricsJob(dag_ids=DAG_IDS, subdir=SUBDIR)
job.run()
if __name__ == "__main__":
main()
| apache-2.0 |
hasecbinusr/pysal | pysal/esda/tests/test_geary.py | 5 | 2997 | """Geary Unittest."""
import unittest
from ... import open as popen
from ... import examples
from .. import geary
import numpy as np
from ...common import pandas
PANDAS_EXTINCT = pandas is None
class Geary_Tester(unittest.TestCase):
"""Geary class for unit tests."""
def setUp(self):
self.w = popen(examples.get_path("book.gal")).read()
f = popen(examples.get_path("book.txt"))
self.y = np.array(f.by_col['y'])
def test_Geary(self):
c = geary.Geary(self.y, self.w, permutations=0)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand, 0.018437747611029367)
self.assertAlmostEquals(c.p_rand, 4.5059156794646782e-07)
self.assertAlmostEquals(c.z_rand, -4.9120733751216008)
self.assertAlmostEquals(c.seC_rand, 0.13578566791465646)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(c.C, 0.33301083591331254)
self.assertAlmostEquals(c.EC, 1.0)
self.assertAlmostEquals(c.VC_norm, 0.031805300245097874)
self.assertAlmostEquals(c.p_norm, 9.2018240680169505e-05)
self.assertAlmostEquals(c.z_norm, -3.7399778367629564)
self.assertAlmostEquals(c.seC_norm, 0.17834040553138225)
self.assertAlmostEquals(c.VC_rand, 0.018437747611029367)
self.assertAlmostEquals(c.p_rand, 4.5059156794646782e-07)
self.assertAlmostEquals(c.z_rand, -4.9120733751216008)
self.assertAlmostEquals(c.seC_rand, 0.13578566791465646)
self.assertAlmostEquals(c.EC_sim, 0.9980676303238214)
self.assertAlmostEquals(c.VC_sim, 0.034430408799858946)
self.assertAlmostEquals(c.p_sim, 0.001)
self.assertAlmostEquals(c.p_z_sim, 0.00016908100514811952)
self.assertAlmostEquals(c.z_sim, -3.5841621159171746)
self.assertAlmostEquals(c.seC_sim, 0.18555432843202269)
@unittest.skipIf(PANDAS_EXTINCT, 'missing pandas')
def test_by_col(self):
import pandas as pd
df = pd.DataFrame(self.y, columns=['y'])
r1 = geary.Geary.by_col(df, ['y'], w=self.w, permutations=999)
this_geary = np.unique(r1.y_geary.values)
this_pval = np.unique(r1.y_p_sim.values)
np.random.seed(12345)
c = geary.Geary(self.y, self.w, permutations=999)
self.assertAlmostEquals(this_geary, c.C)
self.assertAlmostEquals(this_pval, c.p_sim)
suite = unittest.TestSuite()
test_classes = [Geary_Tester]
for i in test_classes:
a = unittest.TestLoader().loadTestsFromTestCase(i)
suite.addTest(a)
if __name__ == '__main__':
runner = unittest.TextTestRunner()
runner.run(suite)
| bsd-3-clause |
jereze/scikit-learn | sklearn/preprocessing/data.py | 68 | 57385 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# License: BSD 3 clause
from itertools import chain, combinations
import numbers
import warnings
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..utils.sparsefuncs import (inplace_column_scale, mean_variance_axis,
min_max_axis, inplace_row_scale)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _mean_and_std(X, axis=0, with_mean=True, with_std=True):
"""Compute mean and std deviation for centering, scaling.
Zero valued std components are reset to 1.0 to avoid NaNs when scaling.
"""
X = np.asarray(X)
Xr = np.rollaxis(X, axis)
if with_mean:
mean_ = Xr.mean(axis=0)
else:
mean_ = None
if with_std:
std_ = Xr.std(axis=0)
std_ = _handle_zeros_in_scale(std_)
else:
std_ = None
return mean_, std_
def _handle_zeros_in_scale(scale):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == 0:
scale = 1.
elif isinstance(scale, np.ndarray):
scale[scale == 0.0] = 1.0
scale[~np.isfinite(scale)] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like or CSR matrix.
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csr', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
mean_, std_ = _mean_and_std(
X, axis, with_mean=with_mean, with_std=with_std)
if copy:
X = X.copy()
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
Xr /= std_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# std_ is very small so that mean_2 = mean_1/std_ > 0, even if
# mean_1 was close to zero. The problem is thus essentially due
# to the lack of precision of mean_. A solution is then to
# substract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
data_min = np.min(X, axis=0)
data_range = np.max(X, axis=0) - data_min
data_range = _handle_zeros_in_scale(data_range)
self.scale_ = (feature_range[1] - feature_range[0]) / data_range
self.min_ = feature_range[0] - data_min * self.scale_
self.data_range = data_range
self.data_min = data_min
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like with shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
std_ : array of floats with shape [n_features]
The standard deviation for each feature in the training set.
Set to one if the standard deviation is zero for a given feature.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : array-like or CSR matrix with shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse='csr', copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
self.mean_ = None
if self.with_std:
var = mean_variance_axis(X, axis=0)[1]
self.std_ = np.sqrt(var)
self.std_ = _handle_zeros_in_scale(self.std_)
else:
self.std_ = None
return self
else:
self.mean_, self.std_ = _mean_and_std(
X, axis=0, with_mean=self.with_mean, with_std=self.with_std)
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.std_ is not None:
inplace_column_scale(X, 1 / self.std_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.std_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'std_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.std_ is not None:
inplace_column_scale(X, self.std_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.std_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
"""
def __init__(self, copy=True):
self.copy = copy
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
scales = np.maximum(np.abs(mins), np.abs(maxs))
else:
scales = np.abs(X).max(axis=0)
scales = np.array(scales)
scales = scales.reshape(-1)
self.scale_ = _handle_zeros_in_scale(scales)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : array-like or CSR matrix.
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
else:
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
s = MaxAbsScaler(copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like with shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, 1.0 / self.scale_)
elif self.axis == 0:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like or CSR matrix.
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if sparse.issparse(X):
if self.with_scaling:
if X.shape[0] == 1:
inplace_row_scale(X, self.scale_)
else:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like.
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0, 0, 1],
[ 1, 2, 3, 4, 6, 9],
[ 1, 4, 5, 16, 20, 25]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1, 0, 1, 0],
[ 1, 2, 3, 6],
[ 1, 4, 5, 20]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array with shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K)
if copy:
K = K.copy()
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : array or scipy.sparse matrix with shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : array or scipy.sparse matrix with shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'])
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : array-like or sparse matrix, shape=(n_samples, n_features)
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : maximum value for all features.
- array : maximum value per feature.
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'float'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those catgorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| bsd-3-clause |
friebsch/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers | Chapter2_MorePyMC/separation_plot.py | 86 | 1494 | # separation plot
# Author: Cameron Davidson-Pilon,2013
# see http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
import matplotlib.pyplot as plt
import numpy as np
def separation_plot( p, y, **kwargs ):
"""
This function creates a separation plot for logistic and probit classification.
See http://mdwardlab.com/sites/default/files/GreenhillWardSacks.pdf
p: The proportions/probabilities, can be a nxM matrix which represents M models.
y: the 0-1 response variables.
"""
assert p.shape[0] == y.shape[0], "p.shape[0] != y.shape[0]"
n = p.shape[0]
try:
M = p.shape[1]
except:
p = p.reshape( n, 1 )
M = p.shape[1]
#colors = np.array( ["#fdf2db", "#e44a32"] )
colors_bmh = np.array( ["#eeeeee", "#348ABD"] )
fig = plt.figure( )#figsize = (8, 1.3*M) )
for i in range(M):
ax = fig.add_subplot(M, 1, i+1)
ix = np.argsort( p[:,i] )
#plot the different bars
bars = ax.bar( np.arange(n), np.ones(n), width=1.,
color = colors_bmh[ y[ix].astype(int) ],
edgecolor = 'none')
ax.plot( np.arange(n+1), np.append(p[ix,i], p[ix,i][-1]), "k",
linewidth = 1.,drawstyle="steps-post" )
#create expected value bar.
ax.vlines( [(1-p[ix,i]).sum()], [0], [1] )
#ax.grid(False)
#ax.axis('off')
plt.xlim( 0, n)
plt.tight_layout()
return
| mit |
fw1121/BDA_py_demos | demos_ch3/demo3_2.py | 19 | 6319 | """Bayesian Data Analysis, 3rd ed
Chapter 3, demo 2
Visualise factored sampling and the corresponding marginal and conditional densities.
"""
from __future__ import division
import os
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
# import from utilities
import os
util_path = '../utilities_and_data' # provide path to utilities
util_path = os.path.abspath(util_path)
if util_path not in os.sys.path and os.path.exists(util_path):
os.sys.path.insert(0, util_path)
import sinvchi2
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
# data
y = np.array([93, 112, 122, 135, 122, 150, 118, 90, 124, 114])
# sufficient statistics
n = len(y)
s2 = np.var(y, ddof=1) # Here ddof=1 is used to get the sample estimate.
my = np.mean(y)
# Factorize the joint posterior p(mu,sigma2|y) to p(sigma2|y)p(mu|sigma2,y)
# Sample from the joint posterior using this factorization
# sample from p(sigma2|y)
nsamp = 1000
sigma2 = sinvchi2.rvs(n-1, s2, size=nsamp)
# sample from p(mu|sigma2,y)
mu = my + np.sqrt(sigma2/n)*np.random.randn(*sigma2.shape)
# display sigma instead of sigma2
sigma = np.sqrt(sigma2)
# For mu compute the density in these points
tl1 = [90, 150]
t1 = np.linspace(tl1[0], tl1[1], 1000)
# For sigma compute the density in these points
tl2 = [10, 60]
t2 = np.linspace(tl2[0], tl2[1], 1000)
# evaluate the joint density in grid
# note that the following is not normalized, but for plotting
# contours it does not matter
Z = stats.norm.pdf(t1, my, t2[:,np.newaxis]/np.sqrt(n))
Z *= (sinvchi2.pdf(t2**2, n-1, s2)*2*t2)[:,np.newaxis]
# compute the exact marginal density for sigma
# multiplication by 2*t2 is due to the transformation of variable
# z=t2^2, see BDA3 p. 21
pm_sigma = sinvchi2.pdf(t2**2, n-1, s2)*2*t2
# N.B. this was already calculated in the joint distribution case
# ====== Illustrate the sampling with interactive plot
# create figure
plotgrid = gridspec.GridSpec(1, 2, width_ratios=[3,2])
fig = plt.figure(figsize=(12,8))
# plot the joint distribution
ax0 = plt.subplot(plotgrid[0,0])
# plot the contour plot of the exact posterior (c_levels is used to give
# a vector of linearly spaced values at which levels contours are drawn)
c_levels = np.linspace(1e-5, Z.max(), 6)[:-1]
plt.contour(t1, t2, Z, c_levels, colors='blue')
# decorate
plt.xlim(tl1)
plt.ylim(tl2)
plt.xlabel('$\mu$', fontsize=20)
plt.ylabel('$\sigma$', fontsize=20)
plt.title('joint posterior')
plt.legend((plt.Line2D([], [], color='blue'),), ('exact contour plot',))
# plot the marginal of sigma
ax1 = plt.subplot(plotgrid[0,1])
plt.plot(pm_sigma, t2, 'b', linewidth=1.5)
# decorate
plt.ylim(tl2)
plt.title('marginal of $\sigma$')
plt.xticks(())
# Function for interactively updating the figure
def update_figure(event):
if icontainer.stage == 0:
icontainer.stage += 1
# first sample of sigma2
line, = ax0.plot(tl1, [sigma[0], sigma[0]], 'k--', linewidth=1.5)
icontainer.legend_h.append(line)
icontainer.legend_s.append('sample from the marginal of $\sigma$')
icontainer.prev_line1 = line
ax0.legend(icontainer.legend_h, icontainer.legend_s)
fig.canvas.draw()
elif icontainer.stage == 1:
icontainer.stage += 1
# the conditional distribution of mu given sigma2
line, = ax0.plot(
t1,
sigma[0] + stats.norm.pdf(t1, my, np.sqrt(sigma2[0]/n))*100,
'g--',
linewidth=1.5
)
icontainer.legend_h.append(line)
icontainer.legend_s.append('conditional distribution of $\mu$')
icontainer.prev_line2 = line
ax0.legend(icontainer.legend_h, icontainer.legend_s)
fig.canvas.draw()
elif icontainer.stage == 2:
icontainer.stage += 1
# sample mu given sigma2
scat = ax0.scatter(mu[0], sigma[0], 40, color='g')
icontainer.legend_h.append(scat)
icontainer.legend_s.append('sample from joint posterior')
icontainer.prev_scat = scat
ax0.legend(icontainer.legend_h, icontainer.legend_s)
fig.canvas.draw()
elif icontainer.stage == 3:
# remove the previous lines
ax0.lines.remove(icontainer.prev_line1)
ax0.lines.remove(icontainer.prev_line2)
# resize the last scatter sample
icontainer.prev_scat.get_sizes()[0] = 8
# draw next sample
icontainer.i1 += 1
i1 = icontainer.i1
# first sample of sigma2
icontainer.prev_line1, = ax0.plot(
tl1, [sigma[i1], sigma[i1]], 'k--', linewidth=1.5
)
# the conditional distribution of mu given sigma2
icontainer.prev_line2, = ax0.plot(
t1,
sigma[i1] + stats.norm.pdf(t1, my, np.sqrt(sigma2[i1]/n))*100,
'g--',
linewidth=1.5
)
# sample mu given sigma2
icontainer.prev_scat = ax0.scatter(mu[i1], sigma[i1], 40, color='g')
# check if the last sample
if icontainer.i1 == icontainer.ndraw-1:
icontainer.stage += 1
fig.canvas.draw()
elif icontainer.stage == 4:
icontainer.stage += 1
# remove the previous lines
ax0.lines.remove(icontainer.prev_line1)
ax0.lines.remove(icontainer.prev_line2)
# resize the last scatter sample
icontainer.prev_scat.get_sizes()[0] = 8
# remove the helper text
plt.suptitle('')
# remove the extra legend entries
icontainer.legend_h.pop(2)
icontainer.legend_h.pop(1)
icontainer.legend_s.pop(2)
icontainer.legend_s.pop(1)
ax0.legend(icontainer.legend_h, icontainer.legend_s)
# plot the remaining samples
icontainer.i1 += 1
i1 = icontainer.i1
ax0.scatter(mu[i1:], sigma[i1:], 8, color='g')
fig.canvas.draw()
# Store the information of the current stage of the figure
class icontainer(object):
stage = 0
i1 = 0
legend_h = [plt.Line2D([], [], color='blue'),]
legend_s = ['exact contour plot',]
prev_line1 = None
prev_line2 = None
prev_scat = None
ndraw = 6
plt.suptitle('Press any key to continue', fontsize=20)
fig.canvas.mpl_connect('key_press_event', update_figure)
plt.show()
| gpl-3.0 |
cmoutard/mne-python | logo/generate_mne_logos.py | 12 | 6091 | # -*- coding: utf-8 -*-
"""
===============================================================================
Script 'mne logo'
===============================================================================
This script makes the logo for MNE.
"""
# @author: drmccloy
# Created on Mon Jul 20 11:28:16 2015
# License: BSD (3-clause)
import numpy as np
import os.path as op
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.mlab import bivariate_normal
from matplotlib.path import Path
from matplotlib.text import TextPath
from matplotlib.patches import PathPatch
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.transforms import Bbox
# manually set values
dpi = 72.
center_fudge = np.array([2, 0]) # compensate for font bounding box padding
tagline_scale_fudge = 0.98 # to get justification right
tagline_offset_fudge = np.array([0.4, 0])
static_dir = op.join('..', 'doc', '_static')
# font, etc
rcp = {'font.sans-serif': ['Primetime'], 'font.style': 'normal',
'font.weight': 'black', 'font.variant': 'normal', 'figure.dpi': dpi,
'savefig.dpi': dpi, 'contour.negative_linestyle': 'solid'}
plt.rcdefaults()
rcParams.update(rcp)
# initialize figure (no axes, margins, etc)
fig = plt.figure(1, figsize=(5, 3), frameon=False, dpi=dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# fake field data
delta = 0.1
x = np.arange(-8.0, 8.0, delta)
y = np.arange(-3.0, 3.0, delta)
X, Y = np.meshgrid(x, y)
Z1 = bivariate_normal(X, Y, 8.0, 7.0, -5.0, 0.9, 1.0)
Z2 = bivariate_normal(X, Y, 15.0, 2.5, 2.6, -2.5, 2.5)
Z = Z2 - 0.7 * Z1
# color map: field gradient (yellow-red-transparent-blue-cyan)
yrtbc = {'red': ((0.0, 1.0, 1.0), (0.5, 1.0, 0.0), (1.0, 0.0, 0.0)),
'blue': ((0.0, 0.0, 0.0), (0.5, 0.0, 1.0), (1.0, 1.0, 1.0)),
'green': ((0.0, 1.0, 1.0), (0.5, 0.0, 0.0), (1.0, 1.0, 1.0)),
'alpha': ((0.0, 1.0, 1.0), (0.4, 0.8, 0.8), (0.5, 0.2, 0.2),
(0.6, 0.8, 0.8), (1.0, 1.0, 1.0))}
# color map: field lines (red | blue)
redbl = {'red': ((0., 1., 1.), (0.5, 1., 0.), (1., 0., 0.)),
'blue': ((0., 0., 0.), (0.5, 0., 1.), (1., 1., 1.)),
'green': ((0., 0., 0.), (1., 0., 0.)),
'alpha': ((0., 0.4, 0.4), (1., 0.4, 0.4))}
mne_field_grad_cols = LinearSegmentedColormap('mne_grad', yrtbc)
mne_field_line_cols = LinearSegmentedColormap('mne_line', redbl)
# plot gradient and contour lines
im = plt.imshow(Z, cmap=mne_field_grad_cols, aspect='equal')
cs = plt.contour(Z, 9, cmap=mne_field_line_cols, linewidths=1)
plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
# create MNE clipping mask
mne_path = TextPath((0, 0), 'MNE')
dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
vert = mne_path.vertices - dims / 2.
mult = (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted (origin at top left)
offset = plot_dims / 2. - center_fudge
mne_clip = Path(offset + vert * mult, mne_path.codes)
# apply clipping mask to field gradient and lines
im.set_clip_path(mne_clip, transform=im.get_transform())
for coll in cs.collections:
coll.set_clip_path(mne_clip, transform=im.get_transform())
# get final position of clipping mask
mne_corners = mne_clip.get_extents().corners()
# add tagline
rcParams.update({'font.sans-serif': ['Cooper Hewitt'], 'font.weight': 'light'})
tag_path = TextPath((0, 0), 'MEG + EEG ANALYSIS & VISUALIZATION')
dims = tag_path.vertices.max(0) - tag_path.vertices.min(0)
vert = tag_path.vertices - dims / 2.
mult = tagline_scale_fudge * (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted
offset = mne_corners[-1] - np.array([mne_clip.get_extents().size[0] / 2.,
-dims[1]]) - tagline_offset_fudge
tag_clip = Path(offset + vert * mult, tag_path.codes)
tag_patch = PathPatch(tag_clip, facecolor='k', edgecolor='none', zorder=10)
ax.add_patch(tag_patch)
yl = ax.get_ylim()
yy = np.max([tag_clip.vertices.max(0)[-1],
tag_clip.vertices.min(0)[-1]])
ax.set_ylim(np.ceil(yy), yl[-1])
# only save actual image extent plus a bit of padding
extent = Bbox(np.c_[ax.get_xlim(), ax.get_ylim()])
extent = extent.transformed(ax.transData + fig.dpi_scale_trans.inverted())
plt.draw()
plt.savefig(op.join(static_dir, 'mne_logo.png'),
bbox_inches=extent.expanded(1.2, 1.))
plt.close()
# 92x22 image
w_px = 92
h_px = 22
center_fudge = np.array([12, 0.5])
scale_fudge = 2.1
rcParams.update({'font.sans-serif': ['Primetime'], 'font.weight': 'black'})
x = np.linspace(-8., 8., w_px / 2.)
y = np.linspace(-3., 3., h_px / 2.)
X, Y = np.meshgrid(x, y)
# initialize figure (no axes, margins, etc)
fig = plt.figure(1, figsize=(w_px / dpi, h_px / dpi), frameon=False, dpi=dpi)
ax = plt.Axes(fig, [0., 0., 1., 1.])
ax.set_axis_off()
fig.add_axes(ax)
# plot rainbow
im = plt.imshow(X, cmap=mne_field_grad_cols, aspect='equal')
plot_dims = np.r_[np.diff(ax.get_xbound()), np.diff(ax.get_ybound())]
# MNE text in white
mne_path = TextPath((0, 0), 'MNE')
dims = mne_path.vertices.max(0) - mne_path.vertices.min(0)
vert = mne_path.vertices - dims / 2.
mult = scale_fudge * (plot_dims / dims).min()
mult = [mult, -mult] # y axis is inverted (origin at top left)
offset = np.array([scale_fudge, 1.]) * \
np.array([-dims[0], plot_dims[-1]]) / 2. - center_fudge
mne_clip = Path(offset + vert * mult, mne_path.codes)
mne_patch = PathPatch(mne_clip, facecolor='w', edgecolor='none', zorder=10)
ax.add_patch(mne_patch)
# adjust xlim and ylim
mne_corners = mne_clip.get_extents().corners()
xmin, ymin = np.min(mne_corners, axis=0)
xmax, ymax = np.max(mne_corners, axis=0)
xl = ax.get_xlim()
yl = ax.get_ylim()
xpad = np.abs(np.diff([xmin, xl[1]])) / 20.
ypad = np.abs(np.diff([ymax, ymin])) / 20.
ax.set_xlim(xmin - xpad, xl[1] + xpad)
ax.set_ylim(ymax + ypad, ymin - ypad)
extent = Bbox(np.c_[ax.get_xlim(), ax.get_ylim()])
extent = extent.transformed(ax.transData + fig.dpi_scale_trans.inverted())
plt.draw()
plt.savefig(op.join(static_dir, 'mne_logo_small.png'), transparent=True,
bbox_inches=extent)
plt.close()
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/metrics/tests/test_pairwise.py | 13 | 26241 | import numpy as np
from numpy import linalg
from scipy.sparse import dok_matrix, csr_matrix, issparse
from scipy.spatial.distance import cosine, cityblock, minkowski, wminkowski
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.externals.six import iteritems
from sklearn.metrics.pairwise import euclidean_distances
from sklearn.metrics.pairwise import manhattan_distances
from sklearn.metrics.pairwise import linear_kernel
from sklearn.metrics.pairwise import chi2_kernel, additive_chi2_kernel
from sklearn.metrics.pairwise import polynomial_kernel
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics.pairwise import laplacian_kernel
from sklearn.metrics.pairwise import sigmoid_kernel
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.metrics.pairwise import cosine_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.metrics.pairwise import pairwise_distances_argmin_min
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.metrics.pairwise import pairwise_kernels
from sklearn.metrics.pairwise import PAIRWISE_KERNEL_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from sklearn.metrics.pairwise import PAIRWISE_BOOLEAN_FUNCTIONS
from sklearn.metrics.pairwise import PAIRED_DISTANCES
from sklearn.metrics.pairwise import check_pairwise_arrays
from sklearn.metrics.pairwise import check_paired_arrays
from sklearn.metrics.pairwise import paired_distances
from sklearn.metrics.pairwise import paired_euclidean_distances
from sklearn.metrics.pairwise import paired_manhattan_distances
from sklearn.preprocessing import normalize
from sklearn.exceptions import DataConversionWarning
def test_pairwise_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
S = pairwise_distances(X, metric="euclidean")
S2 = euclidean_distances(X)
assert_array_almost_equal(S, S2)
# Euclidean distance, with Y != X.
Y = rng.random_sample((2, 4))
S = pairwise_distances(X, Y, metric="euclidean")
S2 = euclidean_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
S2 = pairwise_distances(X_tuples, Y_tuples, metric="euclidean")
assert_array_almost_equal(S, S2)
# "cityblock" uses scikit-learn metric, cityblock (function) is
# scipy.spatial.
S = pairwise_distances(X, metric="cityblock")
S2 = pairwise_distances(X, metric=cityblock)
assert_equal(S.shape[0], S.shape[1])
assert_equal(S.shape[0], X.shape[0])
assert_array_almost_equal(S, S2)
# The manhattan metric should be equivalent to cityblock.
S = pairwise_distances(X, Y, metric="manhattan")
S2 = pairwise_distances(X, Y, metric=cityblock)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Low-level function for manhattan can divide in blocks to avoid
# using too much memory during the broadcasting
S3 = manhattan_distances(X, Y, size_threshold=10)
assert_array_almost_equal(S, S3)
# Test cosine as a string metric versus cosine callable
# The string "cosine" uses sklearn.metric,
# while the function cosine is scipy.spatial
S = pairwise_distances(X, Y, metric="cosine")
S2 = pairwise_distances(X, Y, metric=cosine)
assert_equal(S.shape[0], X.shape[0])
assert_equal(S.shape[1], Y.shape[0])
assert_array_almost_equal(S, S2)
# Test with sparse X and Y,
# currently only supported for Euclidean, L1 and cosine.
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
S = pairwise_distances(X_sparse, Y_sparse, metric="euclidean")
S2 = euclidean_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse, metric="cosine")
S2 = cosine_distances(X_sparse, Y_sparse)
assert_array_almost_equal(S, S2)
S = pairwise_distances(X_sparse, Y_sparse.tocsc(), metric="manhattan")
S2 = manhattan_distances(X_sparse.tobsr(), Y_sparse.tocoo())
assert_array_almost_equal(S, S2)
S2 = manhattan_distances(X, Y)
assert_array_almost_equal(S, S2)
# Test with scipy.spatial.distance metric, with a kwd
kwds = {"p": 2.0}
S = pairwise_distances(X, Y, metric="minkowski", **kwds)
S2 = pairwise_distances(X, Y, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# same with Y = None
kwds = {"p": 2.0}
S = pairwise_distances(X, metric="minkowski", **kwds)
S2 = pairwise_distances(X, metric=minkowski, **kwds)
assert_array_almost_equal(S, S2)
# Test that scipy distance metrics throw an error if sparse matrix given
assert_raises(TypeError, pairwise_distances, X_sparse, metric="minkowski")
assert_raises(TypeError, pairwise_distances, X, Y_sparse,
metric="minkowski")
# Test that a value error is raised if the metric is unknown
assert_raises(ValueError, pairwise_distances, X, Y, metric="blah")
# ignore conversion to boolean in pairwise_distances
@ignore_warnings(category=DataConversionWarning)
def test_pairwise_boolean_distance():
# test that we convert to boolean arrays for boolean distances
rng = np.random.RandomState(0)
X = rng.randn(5, 4)
Y = X.copy()
Y[0, 0] = 1 - Y[0, 0]
for metric in PAIRWISE_BOOLEAN_FUNCTIONS:
for Z in [Y, None]:
res = pairwise_distances(X, Z, metric=metric)
res[np.isnan(res)] = 0
assert_true(np.sum(res != 0) == 0)
def test_pairwise_precomputed():
for func in [pairwise_distances, pairwise_kernels]:
# Test correct shape
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), metric='precomputed')
# with two args
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 4)),
metric='precomputed')
# even if shape[1] agrees (although thus second arg is spurious)
assert_raises_regexp(ValueError, '.* shape .*',
func, np.zeros((5, 3)), np.zeros((4, 3)),
metric='precomputed')
# Test not copied (if appropriate dtype)
S = np.zeros((5, 5))
S2 = func(S, metric="precomputed")
assert_true(S is S2)
# with two args
S = np.zeros((5, 3))
S2 = func(S, np.zeros((3, 3)), metric="precomputed")
assert_true(S is S2)
# Test always returns float dtype
S = func(np.array([[1]], dtype='int'), metric='precomputed')
assert_equal('f', S.dtype.kind)
# Test converts list to array-like
S = func([[1.]], metric='precomputed')
assert_true(isinstance(S, np.ndarray))
def check_pairwise_parallel(func, metric, kwds):
rng = np.random.RandomState(0)
for make_data in (np.array, csr_matrix):
X = make_data(rng.random_sample((5, 4)))
Y = make_data(rng.random_sample((3, 4)))
try:
S = func(X, metric=metric, n_jobs=1, **kwds)
except (TypeError, ValueError) as exc:
# Not all metrics support sparse input
# ValueError may be triggered by bad callable
if make_data is csr_matrix:
assert_raises(type(exc), func, X, metric=metric,
n_jobs=2, **kwds)
continue
else:
raise
S2 = func(X, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
S = func(X, Y, metric=metric, n_jobs=1, **kwds)
S2 = func(X, Y, metric=metric, n_jobs=2, **kwds)
assert_array_almost_equal(S, S2)
def test_pairwise_parallel():
wminkowski_kwds = {'w': np.arange(1, 5).astype('double'), 'p': 1}
metrics = [(pairwise_distances, 'euclidean', {}),
(pairwise_distances, wminkowski, wminkowski_kwds),
(pairwise_distances, 'wminkowski', wminkowski_kwds),
(pairwise_kernels, 'polynomial', {'degree': 1}),
(pairwise_kernels, callable_rbf_kernel, {'gamma': .1}),
]
for func, metric, kwds in metrics:
yield check_pairwise_parallel, func, metric, kwds
def test_pairwise_callable_nonstrict_metric():
# paired_distances should allow callable metric where metric(x, x) != 0
# Knowing that the callable is a strict metric would allow the diagonal to
# be left uncalculated and set to 0.
assert_equal(pairwise_distances([[1.]], metric=lambda x, y: 5)[0, 0], 5)
def callable_rbf_kernel(x, y, **kwds):
# Callable version of pairwise.rbf_kernel.
K = rbf_kernel(np.atleast_2d(x), np.atleast_2d(y), **kwds)
return K
def test_pairwise_kernels(): # Test the pairwise_kernels helper function.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
# Test with all metrics that should be in PAIRWISE_KERNEL_FUNCTIONS.
test_metrics = ["rbf", "laplacian", "sigmoid", "polynomial", "linear",
"chi2", "additive_chi2"]
for metric in test_metrics:
function = PAIRWISE_KERNEL_FUNCTIONS[metric]
# Test with Y=None
K1 = pairwise_kernels(X, metric=metric)
K2 = function(X)
assert_array_almost_equal(K1, K2)
# Test with Y=Y
K1 = pairwise_kernels(X, Y=Y, metric=metric)
K2 = function(X, Y=Y)
assert_array_almost_equal(K1, K2)
# Test with tuples as X and Y
X_tuples = tuple([tuple([v for v in row]) for row in X])
Y_tuples = tuple([tuple([v for v in row]) for row in Y])
K2 = pairwise_kernels(X_tuples, Y_tuples, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with sparse X and Y
X_sparse = csr_matrix(X)
Y_sparse = csr_matrix(Y)
if metric in ["chi2", "additive_chi2"]:
# these don't support sparse matrices yet
assert_raises(ValueError, pairwise_kernels,
X_sparse, Y=Y_sparse, metric=metric)
continue
K1 = pairwise_kernels(X_sparse, Y=Y_sparse, metric=metric)
assert_array_almost_equal(K1, K2)
# Test with a callable function, with given keywords.
metric = callable_rbf_kernel
kwds = {'gamma': 0.1}
K1 = pairwise_kernels(X, Y=Y, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=Y, **kwds)
assert_array_almost_equal(K1, K2)
# callable function, X=Y
K1 = pairwise_kernels(X, Y=X, metric=metric, **kwds)
K2 = rbf_kernel(X, Y=X, **kwds)
assert_array_almost_equal(K1, K2)
def test_pairwise_kernels_filter_param():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((2, 4))
K = rbf_kernel(X, Y, gamma=0.1)
params = {"gamma": 0.1, "blabla": ":)"}
K2 = pairwise_kernels(X, Y, metric="rbf", filter_params=True, **params)
assert_array_almost_equal(K, K2)
assert_raises(TypeError, pairwise_kernels, X, Y, "rbf", **params)
def test_paired_distances():
# Test the pairwise_distance helper function.
rng = np.random.RandomState(0)
# Euclidean distance should be equivalent to calling the function.
X = rng.random_sample((5, 4))
# Euclidean distance, with Y != X.
Y = rng.random_sample((5, 4))
for metric, func in iteritems(PAIRED_DISTANCES):
S = paired_distances(X, Y, metric=metric)
S2 = func(X, Y)
assert_array_almost_equal(S, S2)
S3 = func(csr_matrix(X), csr_matrix(Y))
assert_array_almost_equal(S, S3)
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
# Check the pairwise_distances implementation
# gives the same value
distances = PAIRWISE_DISTANCE_FUNCTIONS[metric](X, Y)
distances = np.diag(distances)
assert_array_almost_equal(distances, S)
# Check the callable implementation
S = paired_distances(X, Y, metric='manhattan')
S2 = paired_distances(X, Y, metric=lambda x, y: np.abs(x - y).sum(axis=0))
assert_array_almost_equal(S, S2)
# Test that a value error is raised when the lengths of X and Y should not
# differ
Y = rng.random_sample((3, 4))
assert_raises(ValueError, paired_distances, X, Y)
def test_pairwise_distances_argmin_min():
# Check pairwise minimum distances computation for any metric
X = [[0], [1]]
Y = [[-1], [2]]
Xsp = dok_matrix(X)
Ysp = csr_matrix(Y, dtype=np.float32)
# euclidean metric
D, E = pairwise_distances_argmin_min(X, Y, metric="euclidean")
D2 = pairwise_distances_argmin(X, Y, metric="euclidean")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# sparse matrix case
Dsp, Esp = pairwise_distances_argmin_min(Xsp, Ysp, metric="euclidean")
assert_array_equal(Dsp, D)
assert_array_equal(Esp, E)
# We don't want np.matrix here
assert_equal(type(Dsp), np.ndarray)
assert_equal(type(Esp), np.ndarray)
# Non-euclidean scikit-learn metric
D, E = pairwise_distances_argmin_min(X, Y, metric="manhattan")
D2 = pairwise_distances_argmin(X, Y, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(D2, [0, 1])
assert_array_almost_equal(E, [1., 1.])
D, E = pairwise_distances_argmin_min(Xsp, Ysp, metric="manhattan")
D2 = pairwise_distances_argmin(Xsp, Ysp, metric="manhattan")
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (callable)
D, E = pairwise_distances_argmin_min(X, Y, metric=minkowski,
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Non-euclidean Scipy distance (string)
D, E = pairwise_distances_argmin_min(X, Y, metric="minkowski",
metric_kwargs={"p": 2})
assert_array_almost_equal(D, [0, 1])
assert_array_almost_equal(E, [1., 1.])
# Compare with naive implementation
rng = np.random.RandomState(0)
X = rng.randn(97, 149)
Y = rng.randn(111, 149)
dist = pairwise_distances(X, Y, metric="manhattan")
dist_orig_ind = dist.argmin(axis=0)
dist_orig_val = dist[dist_orig_ind, range(len(dist_orig_ind))]
dist_chunked_ind, dist_chunked_val = pairwise_distances_argmin_min(
X, Y, axis=0, metric="manhattan", batch_size=50)
np.testing.assert_almost_equal(dist_orig_ind, dist_chunked_ind, decimal=7)
np.testing.assert_almost_equal(dist_orig_val, dist_chunked_val, decimal=7)
def test_euclidean_distances():
# Check the pairwise Euclidean distances computation
X = [[0]]
Y = [[1], [2]]
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
X = csr_matrix(X)
Y = csr_matrix(Y)
D = euclidean_distances(X, Y)
assert_array_almost_equal(D, [[1., 2.]])
rng = np.random.RandomState(0)
X = rng.random_sample((10, 4))
Y = rng.random_sample((20, 4))
X_norm_sq = (X ** 2).sum(axis=1).reshape(1, -1)
Y_norm_sq = (Y ** 2).sum(axis=1).reshape(1, -1)
# check that we still get the right answers with {X,Y}_norm_squared
D1 = euclidean_distances(X, Y)
D2 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq)
D3 = euclidean_distances(X, Y, Y_norm_squared=Y_norm_sq)
D4 = euclidean_distances(X, Y, X_norm_squared=X_norm_sq,
Y_norm_squared=Y_norm_sq)
assert_array_almost_equal(D2, D1)
assert_array_almost_equal(D3, D1)
assert_array_almost_equal(D4, D1)
# check we get the wrong answer with wrong {X,Y}_norm_squared
X_norm_sq *= 0.5
Y_norm_sq *= 0.5
wrong_D = euclidean_distances(X, Y,
X_norm_squared=np.zeros_like(X_norm_sq),
Y_norm_squared=np.zeros_like(Y_norm_sq))
assert_greater(np.max(np.abs(wrong_D - D1)), .01)
# Paired distances
def test_paired_euclidean_distances():
# Check the paired Euclidean distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_euclidean_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_paired_manhattan_distances():
# Check the paired manhattan distances computation
X = [[0], [0]]
Y = [[1], [2]]
D = paired_manhattan_distances(X, Y)
assert_array_almost_equal(D, [1., 2.])
def test_chi_square_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((10, 4))
K_add = additive_chi2_kernel(X, Y)
gamma = 0.1
K = chi2_kernel(X, Y, gamma=gamma)
assert_equal(K.dtype, np.float)
for i, x in enumerate(X):
for j, y in enumerate(Y):
chi2 = -np.sum((x - y) ** 2 / (x + y))
chi2_exp = np.exp(gamma * chi2)
assert_almost_equal(K_add[i, j], chi2)
assert_almost_equal(K[i, j], chi2_exp)
# check diagonal is ones for data with itself
K = chi2_kernel(Y)
assert_array_equal(np.diag(K), 1)
# check off-diagonal is < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
# check that float32 is preserved
X = rng.random_sample((5, 4)).astype(np.float32)
Y = rng.random_sample((10, 4)).astype(np.float32)
K = chi2_kernel(X, Y)
assert_equal(K.dtype, np.float32)
# check integer type gets converted,
# check that zeros are handled
X = rng.random_sample((10, 4)).astype(np.int32)
K = chi2_kernel(X, X)
assert_true(np.isfinite(K).all())
assert_equal(K.dtype, np.float)
# check that kernel of similar things is greater than dissimilar ones
X = [[.3, .7], [1., 0]]
Y = [[0, 1], [.9, .1]]
K = chi2_kernel(X, Y)
assert_greater(K[0, 0], K[0, 1])
assert_greater(K[1, 1], K[1, 0])
# test negative input
assert_raises(ValueError, chi2_kernel, [[0, -1]])
assert_raises(ValueError, chi2_kernel, [[0, -1]], [[-1, -1]])
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[-1, -1]])
# different n_features in X and Y
assert_raises(ValueError, chi2_kernel, [[0, 1]], [[.2, .2, .6]])
# sparse matrices
assert_raises(ValueError, chi2_kernel, csr_matrix(X), csr_matrix(Y))
assert_raises(ValueError, additive_chi2_kernel,
csr_matrix(X), csr_matrix(Y))
def test_kernel_symmetry():
# Valid kernels should be symmetric
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
assert_array_almost_equal(K, K.T, 15)
def test_kernel_sparse():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
X_sparse = csr_matrix(X)
for kernel in (linear_kernel, polynomial_kernel, rbf_kernel,
laplacian_kernel, sigmoid_kernel, cosine_similarity):
K = kernel(X, X)
K2 = kernel(X_sparse, X_sparse)
assert_array_almost_equal(K, K2)
def test_linear_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = linear_kernel(X, X)
# the diagonal elements of a linear kernel are their squared norm
assert_array_almost_equal(K.flat[::6], [linalg.norm(x) ** 2 for x in X])
def test_rbf_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = rbf_kernel(X, X)
# the diagonal elements of a rbf kernel are 1
assert_array_almost_equal(K.flat[::6], np.ones(5))
def test_laplacian_kernel():
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
K = laplacian_kernel(X, X)
# the diagonal elements of a laplacian kernel are 1
assert_array_almost_equal(np.diag(K), np.ones(5))
# off-diagonal elements are < 1 but > 0:
assert_true(np.all(K > 0))
assert_true(np.all(K - np.diag(np.diag(K)) < 1))
def test_cosine_similarity_sparse_output():
# Test if cosine_similarity correctly produces sparse output.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
K1 = cosine_similarity(Xcsr, Ycsr, dense_output=False)
assert_true(issparse(K1))
K2 = pairwise_kernels(Xcsr, Y=Ycsr, metric="cosine")
assert_array_almost_equal(K1.todense(), K2)
def test_cosine_similarity():
# Test the cosine_similarity.
rng = np.random.RandomState(0)
X = rng.random_sample((5, 4))
Y = rng.random_sample((3, 4))
Xcsr = csr_matrix(X)
Ycsr = csr_matrix(Y)
for X_, Y_ in ((X, None), (X, Y),
(Xcsr, None), (Xcsr, Ycsr)):
# Test that the cosine is kernel is equal to a linear kernel when data
# has been previously normalized by L2-norm.
K1 = pairwise_kernels(X_, Y=Y_, metric="cosine")
X_ = normalize(X_)
if Y_ is not None:
Y_ = normalize(Y_)
K2 = pairwise_kernels(X_, Y=Y_, metric="linear")
assert_array_almost_equal(K1, K2)
def test_check_dense_matrices():
# Ensure that pairwise array check works for dense matrices.
# Check that if XB is None, XB is returned as reference to XA
XA = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_true(XA_checked is XB_checked)
assert_array_equal(XA, XA_checked)
def test_check_XB_returned():
# Ensure that if XA and XB are given correctly, they return as equal.
# Check that if XB is not None, it is returned equal.
# Note that the second dimension of XB is the same as XA.
XA = np.resize(np.arange(40), (5, 8))
XB = np.resize(np.arange(32), (4, 8))
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
XB = np.resize(np.arange(40), (5, 8))
XA_checked, XB_checked = check_paired_arrays(XA, XB)
assert_array_equal(XA, XA_checked)
assert_array_equal(XB, XB_checked)
def test_check_different_dimensions():
# Ensure an error is raised if the dimensions are different.
XA = np.resize(np.arange(45), (5, 9))
XB = np.resize(np.arange(32), (4, 8))
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XB = np.resize(np.arange(4 * 9), (4, 9))
assert_raises(ValueError, check_paired_arrays, XA, XB)
def test_check_invalid_dimensions():
# Ensure an error is raised on 1D input arrays.
# The modified tests are not 1D. In the old test, the array was internally
# converted to 2D anyways
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
XA = np.arange(45).reshape(9, 5)
XB = np.arange(32).reshape(4, 8)
assert_raises(ValueError, check_pairwise_arrays, XA, XB)
def test_check_sparse_arrays():
# Ensures that checks return valid sparse matrices.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_sparse = csr_matrix(XA)
XB = rng.random_sample((5, 4))
XB_sparse = csr_matrix(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_sparse, XB_sparse)
# compare their difference because testing csr matrices for
# equality with '==' does not work as expected.
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XB_checked))
assert_equal(abs(XB_sparse - XB_checked).sum(), 0)
XA_checked, XA_2_checked = check_pairwise_arrays(XA_sparse, XA_sparse)
assert_true(issparse(XA_checked))
assert_equal(abs(XA_sparse - XA_checked).sum(), 0)
assert_true(issparse(XA_2_checked))
assert_equal(abs(XA_2_checked - XA_checked).sum(), 0)
def tuplify(X):
# Turns a numpy matrix (any n-dimensional array) into tuples.
s = X.shape
if len(s) > 1:
# Tuplify each sub-array in the input.
return tuple(tuplify(row) for row in X)
else:
# Single dimension input, just return tuple of contents.
return tuple(r for r in X)
def test_check_tuple_input():
# Ensures that checks return valid tuples.
rng = np.random.RandomState(0)
XA = rng.random_sample((5, 4))
XA_tuples = tuplify(XA)
XB = rng.random_sample((5, 4))
XB_tuples = tuplify(XB)
XA_checked, XB_checked = check_pairwise_arrays(XA_tuples, XB_tuples)
assert_array_equal(XA_tuples, XA_checked)
assert_array_equal(XB_tuples, XB_checked)
def test_check_preserve_type():
# Ensures that type float32 is preserved.
XA = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XB = np.resize(np.arange(40), (5, 8)).astype(np.float32)
XA_checked, XB_checked = check_pairwise_arrays(XA, None)
assert_equal(XA_checked.dtype, np.float32)
# both float32
XA_checked, XB_checked = check_pairwise_arrays(XA, XB)
assert_equal(XA_checked.dtype, np.float32)
assert_equal(XB_checked.dtype, np.float32)
# mismatched A
XA_checked, XB_checked = check_pairwise_arrays(XA.astype(np.float),
XB)
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
# mismatched B
XA_checked, XB_checked = check_pairwise_arrays(XA,
XB.astype(np.float))
assert_equal(XA_checked.dtype, np.float)
assert_equal(XB_checked.dtype, np.float)
| bsd-3-clause |
rosswhitfield/mantid | Framework/PythonInterface/mantid/plots/resampling_image/samplingimage.py | 3 | 10843 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2020 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
import matplotlib.colors
import numpy as np
from mantid.plots.datafunctions import get_matrix_2d_ragged, get_normalize_by_bin_width
from mantid.plots.mantidimage import MantidImage
from mantid.api import MatrixWorkspace
MAX_HISTOGRAMS = 5000
class SamplingImage(MantidImage):
def __init__(self,
ax,
workspace,
transpose=False,
cmap=None,
norm=None,
interpolation=None,
origin=None,
extent=None,
filternorm=1,
filterrad=4.0,
resample=False,
normalize_by_bin_width=None,
**kwargs):
super().__init__(ax,
cmap=cmap,
norm=norm,
interpolation=interpolation,
origin=origin,
extent=extent,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
**kwargs)
self.ws = workspace
try:
self.spectrum_info = workspace.spectrumInfo()
except Exception:
self.spectrum_info = None
self.transpose = transpose
self.normalize_by_bin_width = normalize_by_bin_width
self._resize_cid, self._xlim_cid, self._ylim_cid = None, None, None
self._resample_required = True
self._full_extent = extent
self.orig_shape = (workspace.getDimension(0).getNBins(),
workspace.getDimension(1).getNBins())
self._xbins, self._ybins = 100, 100
self.origin = origin
self._update_maxpooling_option()
def connect_events(self):
axes = self.axes
self._resize_cid = axes.get_figure().canvas.mpl_connect('resize_event', self._resize)
self._xlim_cid = axes.callbacks.connect('xlim_changed', self._xlim_changed)
self._ylim_cid = axes.callbacks.connect('ylim_changed', self._ylim_changed)
def disconnect_events(self):
axes = self.axes
axes.get_figure().canvas.mpl_disconnect(self._resize_cid)
axes.callbacks.disconnect(self._xlim_cid)
axes.callbacks.disconnect(self._ylim_cid)
def draw(self, renderer, *args, **kwargs):
if self._resample_required:
self._resample_image()
self._resample_required = False
super().draw(renderer, *args, **kwargs)
def remove(self):
self.disconnect_events()
super().remove()
def _xlim_changed(self, ax):
if self._update_extent():
self._resample_required = True
def _ylim_changed(self, ax):
if self._update_extent():
self._resample_required = True
def _resize(self, canvas):
xbins, ybins = self._calculate_bins_from_extent()
if xbins > self._xbins or ybins > self._ybins:
self._resample_required = True
def _calculate_bins_from_extent(self):
bbox = self.get_window_extent().transformed(
self.axes.get_figure().dpi_scale_trans.inverted())
dpi = self.axes.get_figure().dpi
xbins = int(np.ceil(bbox.width * dpi))
ybins = int(np.ceil(bbox.height * dpi))
return xbins, ybins
def _resample_image(self, xbins=None, ybins=None):
if self._resample_required:
extent = self.get_extent()
if xbins is None or ybins is None:
xbins, ybins = self._calculate_bins_from_extent()
x, y, data = get_matrix_2d_ragged(self.ws,
self.normalize_by_bin_width,
histogram2D=True,
transpose=self.transpose,
extent=extent,
xbins=xbins,
ybins=ybins,
spec_info=self.spectrum_info,
maxpooling=self._maxpooling)
# Data is an MxN matrix.
# If origin = upper extent is set as [xmin, xmax, ymax, ymin].
# Data[M,0] is the data at [xmin, ymin], which should be drawn at the top left corner,
# whereas Data[0,0] is the data at [xmin, ymax], which should be drawn at the bottom left corner.
# Origin upper starts drawing the data from top-left, which means we need to horizontally flip the matrix
if self.origin == "upper":
data = np.flip(data, 0)
self.set_data(data)
self._xbins = xbins
self._ybins = ybins
def _update_extent(self):
"""
Update the extent base on xlim and ylim, should be called after pan or zoom action,
this limits the range that the data will be sampled. Return True or False if extents have changed.
"""
new_extent = self.axes.get_xlim() + self.axes.get_ylim()
if new_extent != self.get_extent():
self.set_extent(new_extent)
return True
else:
return False
def get_full_extent(self):
return self._full_extent
def _update_maxpooling_option(self):
"""
Updates the maxpooling option, used when the image is downsampled
If the workspace is large, or ragged, we skip this maxpooling step and set the option as False
"""
axis = self.ws.getAxis(1)
self._maxpooling = (self.ws.getNumberHistograms() <= MAX_HISTOGRAMS and axis.isSpectra()
and not self.ws.isRaggedWorkspace())
def imshow_sampling(axes,
workspace,
cmap=None,
alpha=None,
vmin=None,
vmax=None,
shape=None,
filternorm=1,
filterrad=4.0,
imlim=None,
url=None,
**kwargs):
"""Copy of imshow but replaced AxesImage with SamplingImage and added
callbacks and Mantid Workspace stuff.
See :meth:`matplotlib.axes.Axes.imshow`
To test:
from mantidqt.widgets.sliceviewer.samplingimage import imshow_sampling
fig, ax = plt.subplots()
im = imshow_sampling(ax, workspace, aspect='auto', origin='lower')
fig.show()
"""
normalize_by_bin_width, kwargs = get_normalize_by_bin_width(workspace, axes, **kwargs)
transpose = kwargs.pop('transpose', False)
extent = kwargs.pop('extent', None)
interpolation = kwargs.pop('interpolation', None)
origin = kwargs.pop('origin', None)
norm = kwargs.pop('norm', None)
resample = kwargs.pop('resample', False)
kwargs.pop('distribution', None)
if not extent:
x0, x1, y0, y1 = (workspace.getDimension(0).getMinimum(),
workspace.getDimension(0).getMaximum(),
workspace.getDimension(1).getMinimum(),
workspace.getDimension(1).getMaximum())
if isinstance(workspace, MatrixWorkspace) and not workspace.isCommonBins():
# for MatrixWorkspace the x extent obtained from dimension 0 corresponds to the first spectrum
# this is not correct in case of ragged workspaces, where we need to obtain the global xmin and xmax
# moreover the axis might be in ascending or descending order, so x[0] is not necessarily the minimum
xmax, xmin = None, None # don't initialise with values from first spectrum as could be a monitor
si = workspace.spectrumInfo()
for i in range(workspace.getNumberHistograms()):
if si.hasDetectors(i) and not si.isMonitor(i):
x_axis = workspace.readX(i)
x_i_first = x_axis[0]
x_i_last = x_axis[-1]
x_i_min = min([x_i_first, x_i_last])
x_i_max = max([x_i_first, x_i_last])
# effectively ignore spectra with nan or inf values
if np.isfinite(x_i_min):
xmin = min([x_i_min, xmin]) if xmin else x_i_min
if np.isfinite(x_i_max):
xmax = max([x_i_max, xmax]) if xmax else x_i_max
x0 = xmin if xmin else x0
x1 = xmax if xmax else x1
if workspace.getDimension(1).getNBins() == workspace.getAxis(1).length():
width = workspace.getDimension(1).getBinWidth()
y0 -= width / 2
y1 += width / 2
if origin == "upper":
y0, y1 = y1, y0
extent = (x0, x1, y0, y1)
if transpose:
e1, e2, e3, e4 = extent
extent = e3, e4, e1, e2
# from matplotlib.axes.Axes.imshow
if norm is not None and not isinstance(norm, matplotlib.colors.Normalize):
raise ValueError("'norm' must be an instance of 'mcolors.Normalize'")
aspect = kwargs.pop('aspect', matplotlib.rcParams['image.aspect'])
axes.set_aspect(aspect)
im = SamplingImage(axes,
workspace,
transpose,
cmap,
norm,
interpolation,
origin,
extent,
filternorm=filternorm,
filterrad=filterrad,
resample=resample,
normalize_by_bin_width=normalize_by_bin_width,
**kwargs)
im._resample_image(100, 100)
im.set_alpha(alpha)
im.set_url(url)
if im.get_clip_path() is None:
# image does not already have clipping set, clip to axes patch
im.set_clip_path(axes.patch)
if vmin is not None or vmax is not None:
if norm is not None and isinstance(norm, matplotlib.colors.LogNorm):
if vmin <= 0:
vmin = 0.0001
if vmax <= 0:
vmax = 1
im.set_clim(vmin, vmax)
else:
im.autoscale_None()
# update ax.dataLim, and, if autoscaling, set viewLim
# to tightly fit the image, regardless of dataLim.
im.set_extent(im.get_extent())
axes.add_image(im)
if extent:
axes.set_xlim(extent[0], extent[1])
axes.set_ylim(extent[2], extent[3])
im.connect_events()
return im
| gpl-3.0 |
kit-cel/wt | nt2/modulation_pulsformung/Spektren_digitale_Modulation.py | 1 | 2940 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 13 10:31:13 2014
NTII Demo - Quellencodierung - Auswirkungen auf Spektrum des Sendesignals
Systemmodell: Quelle --> QPSK --> Pulsformung
@author: Michael Schwall
"""
from __future__ import division
import numpy as np
import matplotlib.pylab as plt
import scipy.signal as sig
import rrc as rrc
plt.close("all")
###############################################################################
## Systemparameter
###############################################################################
# Anzahl der simulierten Symbole
K = 65536
# Wahrscheinlichkeit fuer ein 1-Bit
P_b_1 = np.array([0.5,0.1])
# Ueberabtastung (Samples pro Symbol)
N = 4
# RRC-Filter, Rolloff-Faktor, Anzahl Filterkoeffizienten
alpha = 0
N_rrc = N*16+1
# FFT Length
N_FFT = 1024
# Pruefe Eingaben
assert (K > 0 and (K & (K - 1)) == 0), 'K muss eine Potenz von 2 sein'
assert (N > 0 and N%2 == 0), 'N muss groesser Null sein und gerade'
assert (alpha >= 0 and alpha <= 1), 'Fuer den Rolloff-Faktor gilt: 0 <= alpha <= 1'
assert (N_rrc > 0 and N_rrc%2 != 0), 'N_rrc muss groesser Null sein und ungerade'
###############################################################################
## Sender
###############################################################################
idx=0
s_tx_rrc = np.zeros((K*N,len(P_b_1)))
while idx < len(P_b_1):
# Bits erzeugen
b = (P_b_1[idx]+np.random.uniform(-1.0,0.0,size=K) >= 0).astype(int)
# BPSK Symbole erzeugen
I = (2*b-1)
print "P(b=1)=%0.2f --> E{I} = %0.2f --> Var{I} = %0.2f" % (P_b_1[idx], I.mean(), I.var())
# Ueberabtasten um Faktor N
s_up = np.zeros(K*N)
s_up[::N] = I;
# Root-Raised-Cosine (RRC) Filter
h_rrc = rrc.get_rrc_ir(N_rrc,N,1.0,alpha)
s_tx_rrc[:,idx] = sig.lfilter(h_rrc,1.0,s_up)
idx += 1
##############################################################################
# Ausgabe
##############################################################################
# Einschwingzeit RRC Filter (Hilfsgroesse)
N_osc = (N_rrc-1)/2
fig1 = plt.figure()
fig1.suptitle("Pulsformung (RRC, alpha=%0.2f)" % alpha, fontsize=14, fontweight='bold')
ax1 = fig1.add_subplot(1,2,1)
ax1.set_title('Impulsantwort RRC')
ax1.stem(np.array(np.arange(-N_osc,N_osc+1)),h_rrc)
ax1.set_xlim(-N_osc,N_osc+1)
ax1.grid(True)
ax1.set_xlabel('k (t/Ts/N)')
ax1.set_ylabel('Amplitude')
ax2 = fig1.add_subplot(1,2,2)
ax2.set_title('PSD QPSK mit RRC-Pulsformung')
idx=0
while idx < len(P_b_1):
Pxx_rrc = 1/N_FFT*(np.abs(np.fft.fftshift(np.fft.fft(np.reshape(s_tx_rrc[:,idx],(-1,N_FFT)),axis=1)))**2).sum(0)
f = np.linspace(-0.5,0.5,len(Pxx_rrc))
ax2.plot(f, 10*np.log10(Pxx_rrc))
idx += 1
start, end = ax2.get_ylim()
ax2.yaxis.set_ticks(np.arange(start, end, 10))
ax2.set_xlim(-0.5,0.5)
ax2.grid(True)
ax2.set_xlabel('n (f/N/Ts)')
ax2.set_ylabel('Amplitude [dB]')
plt.show()
| gpl-2.0 |
zardav/FaceDetection | FaceDetection/temp.py | 1 | 3849 | import numpy as np
from scipy import ndimage, misc
from matplotlib import pyplot as plt
import glob
from MyViola import MyViolaClassifier
from Svm import Svm
import funcs
def find_face(img, shape, mv):
res_i = (0, 0)
res_j = (0, 0)
res_scl = 1
max_ = 0
scales = np.arange(.2, .35, .025)
m, n = shape
for scl in scales:
img_ = misc.imresize(img, scl)
mv.change_image(img_)
x, y = img_.shape[:2]
if x < m or y < n:
continue
for i, j in funcs.iter_shape((x, y), shape, 4):
val = mv.valuefy((i, j))
if val > max_:
max_ = val
res_i, res_j = i, j
res_scl = scl
return (int(res_i[0] / res_scl), int(res_i[1] / res_scl)), (int(res_j[0] / res_scl), int(res_j[1] / res_scl))
def get_sub_pics_with_size(imgs, shape):
scales = np.arange(.2, 1, .2)
m, n = shape
for img in imgs:
while img.shape[0] > 800:
img = misc.imresize(img, 0.5)
for scl in scales:
img_ = misc.imresize(img, scl)
x, y = img_.shape[:2]
if x < m or y < n:
continue
i = 0
while i + m < x:
j = 0
while j + n < y:
yield img_[i:i+m, j:j+n]
j += n
i += m
def temp():
files = glob.glob('../../faces/cropped/*.jpg')
faces = (misc.imread(im) for im in files)
mv = MyViolaClassifier()
mv.add_examples(faces, 1)
files = glob.glob('../../faces/nofaces/*.jpg')
nofaces = (misc.imread(im) for im in files)
mv.add_examples(get_sub_pics_with_size(nofaces, (137, 100)), -1)
mv.learn()
mv.save('my_viola.pkl')
files = glob.glob('../../faces/*.jpg')
for f in files:
img = misc.imread(f)
new_path = f.replace('/faces\\', '/faces\\new1\\')
i, j = find_face(img, (137, 100), mv)
i1, i2 = i
j1, j2 = j
new_img = img[i1:i2, j1:j2]
try:
misc.imsave(new_path, new_img)
except ValueError:
pass
def plot_image_faces(img, shape, mv):
plot_im_with_rects(img, get_all_faces_rects(img, shape, mv))
def plot_im_with_rects(img, rect_list):
img1 = img
for rect in rect_list:
img1 = funcs.implusrect(img1, rect[0], rect[1], (0, 255, 0))
plt.imshow(img1)
def get_all_faces_rects(img, shape, mv):
return [a[0] for a in filter_overlap_windows(get_all_windows(img, shape, mv))]
def get_all_windows(img, shape, mv):
scales = np.arange(.2, .35, .02)
m, n = shape
for scl in scales:
img_ = misc.imresize(img, scl)
mv.change_image(img_)
x, y = img_.shape[:2]
if x < m or y < n:
continue
for i, j in funcs.iter_shape((x, y), shape, 4):
val = mv.valuefy((i, j))
if val > 0:
res_i = (int(i[0] / scl), int(i[1] / scl))
res_j = (int(j[0] / scl), int(j[1] / scl))
yield ((res_i, res_j), val)
def is_pos_in_rect(pos, rect):
x, y = pos
(i1, i2), (j1, j2) = rect
return i1 <= x <= i2 and j1 <= y <= j2
def mid_point(rect):
(i1, i2), (j1, j2) = rect
return int((i1 + i2) / 2), int((j1 + j2) / 2)
def are_overlap(window1, window2):
return is_pos_in_rect(mid_point(window1), window2) or is_pos_in_rect(mid_point(window2), window1)
def filter_overlap_windows(windows):
maxs = []
for w in windows:
w_waiting = True
index = 0
while index < len(maxs) and w_waiting:
if are_overlap(w[0], maxs[index][0]):
if w[1] > maxs[index][1]:
maxs[index] = w
w_waiting = False
index += 1
if w_waiting:
maxs.append(w)
return maxs
| gpl-3.0 |
xuerenlv/PaperWork | my_study/pandas_study/test1.py | 1 | 1044 | # -*- coding: utf-8 -*-
'''
Created on Oct 16, 2015
@author: nlp
'''
import sys
import traceback
from store_model import Single_weibo_store
import datetime
from datetime import timedelta
import pprint
import jieba
reload(sys)
sys.setdefaultencoding('utf8')
from sklearn import svm
X = [[0, 0], [1, 1]]
y = [0, 1]
clf = svm.SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0, degree=3,
gamma=0.0, kernel='rbf', max_iter=-1, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
clf.fit(X, y)
def is_all_chinese(word):
for uchar in word:
if uchar >= u'\u4e00' and uchar<=u'\u9fa5':
pass
else:
return False
return True
def is_all_alpha(word):
for one in word:
if (one >= 'a' and one <= u'z') or (one >= 'A' and one <= u'Z'):
pass
else:
return False
return True
if __name__ == '__main__':
x = '[timedelta'
y = u'薛薛啊结构'
print is_all_chinese(x)
print is_all_chinese(y)
pass
| apache-2.0 |
mwaskom/seaborn | doc/tools/generate_logos.py | 2 | 6982 | import numpy as np
import seaborn as sns
from matplotlib import patches
import matplotlib.pyplot as plt
from scipy.signal import gaussian
from scipy.spatial import distance
XY_CACHE = {}
STATIC_DIR = "_static"
plt.rcParams["savefig.dpi"] = 300
def poisson_disc_sample(array_radius, pad_radius, candidates=100, d=2, seed=None):
"""Find positions using poisson-disc sampling."""
# See http://bost.ocks.org/mike/algorithms/
rng = np.random.default_rng(seed)
uniform = rng.uniform
randint = rng.integers
# Cache the results
key = array_radius, pad_radius, seed
if key in XY_CACHE:
return XY_CACHE[key]
# Start at a fixed point we know will work
start = np.zeros(d)
samples = [start]
queue = [start]
while queue:
# Pick a sample to expand from
s_idx = randint(len(queue))
s = queue[s_idx]
for i in range(candidates):
# Generate a candidate from this sample
coords = uniform(s - 2 * pad_radius, s + 2 * pad_radius, d)
# Check the three conditions to accept the candidate
in_array = np.sqrt(np.sum(coords ** 2)) < array_radius
in_ring = np.all(distance.cdist(samples, [coords]) > pad_radius)
if in_array and in_ring:
# Accept the candidate
samples.append(coords)
queue.append(coords)
break
if (i + 1) == candidates:
# We've exhausted the particular sample
queue.pop(s_idx)
samples = np.array(samples)
XY_CACHE[key] = samples
return samples
def logo(
ax,
color_kws, ring, ring_idx, edge,
pdf_means, pdf_sigma, dy, y0, w, h,
hist_mean, hist_sigma, hist_y0, lw, skip,
scatter, pad, scale,
):
# Square, invisible axes with specified limits to center the logo
ax.set(xlim=(35 + w, 95 - w), ylim=(-3, 53))
ax.set_axis_off()
ax.set_aspect('equal')
# Magic numbers for the logo circle
radius = 27
center = 65, 25
# Full x and y grids for a gaussian curve
x = np.arange(101)
y = gaussian(x.size, pdf_sigma)
x0 = 30 # Magic number
xx = x[x0:]
# Vertical distances between the PDF curves
n = len(pdf_means)
dys = np.linspace(0, (n - 1) * dy, n) - (n * dy / 2)
dys -= dys.mean()
# Compute the PDF curves with vertical offsets
pdfs = [h * (y[x0 - m:-m] + y0 + dy) for m, dy in zip(pdf_means, dys)]
# Add in constants to fill from bottom and to top
pdfs.insert(0, np.full(xx.shape, -h))
pdfs.append(np.full(xx.shape, 50 + h))
# Color gradient
colors = sns.cubehelix_palette(n + 1 + bool(hist_mean), **color_kws)
# White fill between curves and around edges
bg = patches.Circle(
center, radius=radius - 1 + ring, color="white",
transform=ax.transData, zorder=0,
)
ax.add_artist(bg)
# Clipping artist (not shown) for the interior elements
fg = patches.Circle(center, radius=radius - edge, transform=ax.transData)
# Ring artist to surround the circle (optional)
if ring:
wedge = patches.Wedge(
center, r=radius + edge / 2, theta1=0, theta2=360, width=edge / 2,
transform=ax.transData, color=colors[ring_idx], alpha=1
)
ax.add_artist(wedge)
# Add histogram bars
if hist_mean:
hist_color = colors.pop(0)
hist_y = gaussian(x.size, hist_sigma)
hist = 1.1 * h * (hist_y[x0 - hist_mean:-hist_mean] + hist_y0)
dx = x[skip] - x[0]
hist_x = xx[::skip]
hist_h = h + hist[::skip]
# Magic number to avoid tiny sliver of bar on edge
use = hist_x < center[0] + radius * .5
bars = ax.bar(
hist_x[use], hist_h[use], bottom=-h, width=dx,
align="edge", color=hist_color, ec="w", lw=lw,
zorder=3,
)
for bar in bars:
bar.set_clip_path(fg)
# Add each smooth PDF "wave"
for i, pdf in enumerate(pdfs[1:], 1):
u = ax.fill_between(xx, pdfs[i - 1] + w, pdf, color=colors[i - 1], lw=0)
u.set_clip_path(fg)
# Add scatterplot in top wave area
if scatter:
seed = sum(map(ord, "seaborn logo"))
xy = poisson_disc_sample(radius - edge - ring, pad, seed=seed)
clearance = distance.cdist(xy + center, np.c_[xx, pdfs[-2]])
use = clearance.min(axis=1) > pad / 1.8
x, y = xy[use].T
sizes = (x - y) % 9
points = ax.scatter(
x + center[0], y + center[1], s=scale * (10 + sizes * 5),
zorder=5, color=colors[-1], ec="w", lw=scale / 2,
)
path = u.get_paths()[0]
points.set_clip_path(path, transform=u.get_transform())
u.set_visible(False)
def savefig(fig, shape, variant):
fig.subplots_adjust(0, 0, 1, 1, 0, 0)
facecolor = (1, 1, 1, 1) if bg == "white" else (1, 1, 1, 0)
for ext in ["png", "svg"]:
fig.savefig(f"{STATIC_DIR}/logo-{shape}-{variant}bg.{ext}", facecolor=facecolor)
if __name__ == "__main__":
for bg in ["white", "light", "dark"]:
color_idx = -1 if bg == "dark" else 0
kwargs = dict(
color_kws=dict(start=.3, rot=-.4, light=.8, dark=.3, reverse=True),
ring=True, ring_idx=color_idx, edge=1,
pdf_means=[8, 24], pdf_sigma=16,
dy=1, y0=1.8, w=.5, h=12,
hist_mean=2, hist_sigma=10, hist_y0=.6, lw=1, skip=6,
scatter=True, pad=1.8, scale=.5,
)
color = sns.cubehelix_palette(**kwargs["color_kws"])[color_idx]
# ------------------------------------------------------------------------ #
fig, ax = plt.subplots(figsize=(2, 2), facecolor="w", dpi=100)
logo(ax, **kwargs)
savefig(fig, "mark", bg)
# ------------------------------------------------------------------------ #
fig, axs = plt.subplots(1, 2, figsize=(8, 2), dpi=100,
gridspec_kw=dict(width_ratios=[1, 3]))
logo(axs[0], **kwargs)
font = {
"family": "avenir",
"color": color,
"weight": "regular",
"size": 120,
}
axs[1].text(.01, .35, "seaborn", ha="left", va="center",
fontdict=font, transform=axs[1].transAxes)
axs[1].set_axis_off()
savefig(fig, "wide", bg)
# ------------------------------------------------------------------------ #
fig, axs = plt.subplots(2, 1, figsize=(2, 2.5), dpi=100,
gridspec_kw=dict(height_ratios=[4, 1]))
logo(axs[0], **kwargs)
font = {
"family": "avenir",
"color": color,
"weight": "regular",
"size": 34,
}
axs[1].text(.5, 1, "seaborn", ha="center", va="top",
fontdict=font, transform=axs[1].transAxes)
axs[1].set_axis_off()
savefig(fig, "tall", bg)
| bsd-3-clause |
omnirom/android_kernel_htc_flounder | scripts/tracing/dma-api/plotting.py | 96 | 4043 | """Ugly graph drawing tools"""
import matplotlib.pyplot as plt
import matplotlib.cm as cmap
#import numpy as np
from matplotlib import cbook
# http://stackoverflow.com/questions/4652439/is-there-a-matplotlib-equivalent-of-matlabs-datacursormode
class DataCursor(object):
"""A simple data cursor widget that displays the x,y location of a
matplotlib artist when it is selected."""
def __init__(self, artists, tolerance=5, offsets=(-20, 20),
template='x: %0.2f\ny: %0.2f', display_all=False):
"""Create the data cursor and connect it to the relevant figure.
"artists" is the matplotlib artist or sequence of artists that will be
selected.
"tolerance" is the radius (in points) that the mouse click must be
within to select the artist.
"offsets" is a tuple of (x,y) offsets in points from the selected
point to the displayed annotation box
"template" is the format string to be used. Note: For compatibility
with older versions of python, this uses the old-style (%)
formatting specification.
"display_all" controls whether more than one annotation box will
be shown if there are multiple axes. Only one will be shown
per-axis, regardless.
"""
self.template = template
self.offsets = offsets
self.display_all = display_all
if not cbook.iterable(artists):
artists = [artists]
self.artists = artists
self.axes = tuple(set(art.axes for art in self.artists))
self.figures = tuple(set(ax.figure for ax in self.axes))
self.annotations = {}
for ax in self.axes:
self.annotations[ax] = self.annotate(ax)
for artist in self.artists:
artist.set_picker(tolerance)
for fig in self.figures:
fig.canvas.mpl_connect('pick_event', self)
def annotate(self, ax):
"""Draws and hides the annotation box for the given axis "ax"."""
annotation = ax.annotate(self.template, xy=(0, 0), ha='right',
xytext=self.offsets, textcoords='offset points', va='bottom',
bbox=dict(boxstyle='round,pad=0.5', fc='yellow', alpha=0.5),
arrowprops=dict(arrowstyle='->', connectionstyle='arc3,rad=0')
)
annotation.set_visible(False)
return annotation
def __call__(self, event):
"""Intended to be called through "mpl_connect"."""
# Rather than trying to interpolate, just display the clicked coords
# This will only be called if it's within "tolerance", anyway.
x, y = event.mouseevent.xdata, event.mouseevent.ydata
try:
annotation = self.annotations[event.artist.axes]
except KeyError:
return
if x is not None:
if not self.display_all:
# Hide any other annotation boxes...
for ann in self.annotations.values():
ann.set_visible(False)
# Update the annotation in the current axis..
annotation.xy = x, y
annotation.set_text(self.template % (x, y))
annotation.set_visible(True)
event.canvas.draw()
def plotseries(*serieslabels):
"""Plot lists of series in separate axes, tie time axis together"""
global fig
fig, axes = plt.subplots(nrows=len(serieslabels), sharex=True)
for subplot, ax in zip(serieslabels, axes):
for ser, lab in zip(*subplot): # subplot = ([x], [y])
ax.step(ser[0], ser[1], label=lab, where="post")
ax.grid(True)
ax.legend()
(DataCursor(ax.lines))
plt.grid(True)
plt.show()
def disp_pic(bitmap):
"""Display the allocation bitmap. TODO."""
fig=plt.figure()
a=fig.add_subplot(1,1,1)
fig.clf()
implt=plt.imshow(bitmap, extent=(0, len(bitmap[0]), 0, len(bitmap)),
interpolation="nearest", cmap=cmap.gist_heat)
fig.canvas.draw()
plt.show()
| gpl-2.0 |
Didou09/tofu | tofu/mag/regression_test.py | 2 | 11158 | # -*- coding: utf-8 -*-
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
# Also if needed: retab
'''
Regression test
'''
from __future__ import (unicode_literals, absolute_import, \
print_function, division)
import argparse
import matplotlib.pyplot as plt
import numpy as np
import os
import subprocess
import sys
import time
#print('path 1 =', sys.path)
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
#print('path 2 =', sys.path)
# Local modules
import equimap
import imas
# REFERENCE FILE !!!
# ==================
REF_FILE = 'reference.npz'
REF_SHOT = 54178
REF_RUN = 9
REF_OCC = 0
REF_USER = 'imas_public'
REF_MACHINE = 'west'
# ==================
# Parameters
interp_points = 30
eps_time = 1.23456789E-2
lquantities = ('rho_pol_norm', 'rho_tor_norm', 'rho_tor', 'psi', 'phi', \
'theta', 'j_tor', 'b_field_r', 'b_field_z', 'b_field_tor', \
'b_field_norm')
def eval_diff(data, data_ref, name, rel_tolerance=1E-10):
'''
Function
--------
eval_diff(data, data_ref, name='data', rel_tolerance=1E-10)
Output
------
print the maximum and the maximum index of difference, displays
an error if the maximum is above the given relative tolerance
'''
data = np.asarray(data)
data_ref = np.asarray(data_ref)
if (data.shape != data_ref.shape):
raise ValueError('Shape of input data is not equal')
rel_diff = np.abs( (data - data_ref) / data_ref )
max_rel_diff = np.nanmax(rel_diff)
if (rel_diff.ndim != 0):
ind_max_rel_diff = np.unravel_index(np.nanargmax(rel_diff), rel_diff.shape)
else:
ind_max_rel_diff = 0
if (max_rel_diff > rel_tolerance):
raise ValueError('ERROR test in: ' + name + ', max relative difference = '
+ '{0} at index = {1}'.format(max_rel_diff, ind_max_rel_diff))
print('')
print('In field name: ' + name + ', max relative difference = '
+ '{0} at index = {1}'.format(max_rel_diff, ind_max_rel_diff))
print('')
if __name__ == '__main__':
print(' ')
# Parse input arguments
parser = argparse.ArgumentParser(description= \
'''Run regression EQUIMAP test using REF_FILE = {0}; REF_SHOT = {1};
REF_RUN = {2}; REF_OCC = {3}; REF_USER = {4}; REF_MACHINE = {5}
'''.format(REF_FILE, REF_SHOT, REF_RUN, REF_OCC, REF_USER, REF_MACHINE))
# To exclude 2 conflict options use:
#group = parser.add_mutually_exclusive_group()
#parser.add_argument('shot', type=int, nargs='?', default=53259, help='shot, default=53259')
parser.add_argument('--saveFile', action='store_true', \
help='saves a Python .npz file')
parser.add_argument('--figures', action='store_true', \
help='plot figures')
parser.add_argument('--no-git-check', action='store_true', \
help='no check for changes that are not commited')
args = parser.parse_args()
print('REF FILE =', REF_FILE)
print(' ')
if (not args.no_git_check):
try:
subprocess.run(['git', 'diff', '--exit-code', '--quiet'], check=True)
subprocess.run(['git', 'diff', '--cached', '--exit-code', '--quiet'], check=True)
except subprocess.CalledProcessError as err:
print(' ')
print('ERROR: not commited changes, please commit the changes.', err)
print(' ')
raise
# Initialize dictionary to store results
results = {}
idd = imas.ids(REF_SHOT, REF_RUN)
idd.open_env(REF_USER, REF_MACHINE, '3')
if (REF_OCC == 0):
idd.equilibrium.get()
else:
idd.equilibrium.get(REF_OCC)
equi = idd.equilibrium
# Test one time and spatial 3D
# ----------------------------
time_in = eps_time + 0.5*(np.nanmax(equi.time) + np.nanmin(equi.time))
equi_grid = idd.equilibrium.grids_ggd[0].grid[0]
NbrPoints = len(equi_grid.space[0].objects_per_dimension[0].object)
equiDict = {}
equiDict['r'] = np.full(NbrPoints, np.nan)
equiDict['z'] = np.full(NbrPoints, np.nan)
for ii in range(NbrPoints):
equiDict['r'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[0]
equiDict['z'][ii] = equi_grid.space[0].objects_per_dimension[0]. \
object[ii].geometry[1]
R_in = np.linspace(np.min(equiDict['r']), \
np.max(equiDict['r']), interp_points)
Z_in = np.linspace(np.min(equiDict['z']), \
np.max(equiDict['z']), interp_points)
Phi_in = np.linspace(0, 2*np.pi/18, interp_points)
R_in_tot = np.tile(R_in, int(interp_points**2))
Z_in_tot = np.tile(np.repeat(Z_in, interp_points), interp_points)
Phi_in_tot = np.repeat(Phi_in, int(interp_points**2))
Rr = R_in_tot.reshape((interp_points, interp_points, interp_points))
Zr = Z_in_tot.reshape((interp_points, interp_points, interp_points))
for iquant in lquantities:
start = time.time()
#sys.stdout = open(os.devnull, 'w')
out = equimap.get(REF_SHOT, time=time_in, R=R_in_tot, Phi=Phi_in_tot, \
Z=Z_in_tot, quantity=iquant, no_ripple=False, \
run=REF_RUN, occ=REF_OCC, user=REF_USER, \
machine=REF_MACHINE)
#sys.stdout = sys.__stdout__
end = time.time()
print()
print('====================================')
print('time for', iquant, ' =', end - start)
print('====================================')
print()
if (args.figures):
outr = out.reshape((interp_points, interp_points, interp_points))
plt.figure()
plt.contourf(Rr[int(0.2*interp_points), :, :], \
Zr[int(0.2*interp_points), :, :], \
outr[int(0.2*interp_points), :, :])
arg_time = np.argmin(np.abs(equi.time - time_in))
plt.plot(np.squeeze(equi.time_slice[arg_time].boundary.outline.r), \
np.squeeze(equi.time_slice[arg_time].boundary.outline.z), \
linewidth=2, color='red')
plt.plot(equi.time_slice[arg_time].global_quantities.magnetic_axis.r, \
equi.time_slice[arg_time].global_quantities.magnetic_axis.z, \
marker='+', color='red', markersize=20)
plt.xlabel('R [m]')
plt.ylabel('Z [m]')
plt.title('{0} t={1:.2f}'.format(iquant, time_in))
plt.colorbar()
# Save results in dict
results[iquant] = out
# End loop on lquantities
# Test large time and spatial 2D (R, Phi)
# --------------------------------------
# Check code.output_flag for data validity
if (np.any(np.isnan(equi.code.output_flag))):
mask = np.full(len(equi.time), True, dtype=bool)
else:
mask = np.asarray(equi.code.output_flag) >= 0
time1 = 0.495*(np.nanmax(equi.time[mask]) + np.nanmin(equi.time[mask]))
time2 = 0.505*(np.nanmax(equi.time[mask]) + np.nanmin(equi.time[mask]))
mask_time_tmp = (equi.time[mask] >= time1) \
& (equi.time[mask] <= time2)
indMin = np.abs(equi.time[mask] \
- equi.time[mask][mask_time_tmp][0]).argmin()
indMax = np.abs(equi.time[mask] \
- equi.time[mask][mask_time_tmp][-1]).argmin()
if (indMin == 0):
indMinApply = indMin
else:
indMinApply = indMin - 1
if (indMax == (equi.time[mask].size-1)):
indMaxApply = indMax
else:
indMaxApply = indMax + 1
mask_time = (equi.time[mask] >= equi.time[mask][indMinApply]) \
& (equi.time[mask] <= equi.time[mask][indMaxApply])
time_points = equi.time[mask][mask_time]
time_in = np.linspace(time1, time2, time_points.size + 1)
time_in += eps_time
R_in = np.linspace(np.min(equiDict['r']), \
np.max(equiDict['r']), interp_points)
Phi_in = np.linspace(0, 2*np.pi/18, interp_points)
R_in_tot = np.tile(R_in, interp_points)
Z_in_tot = np.zeros(R_in_tot.shape)
Phi_in_tot = np.repeat(Phi_in, interp_points)
Rr = R_in_tot.reshape((interp_points, interp_points))
Phir = Phi_in_tot.reshape((interp_points, interp_points))
arg_time = np.argmin(np.abs(equi.time - time_in[int(0.5*time_in.size)]))
if (args.figures):
mask_LFS = (equi.time_slice[arg_time].boundary.outline.r > equi.time_slice[arg_time].global_quantities.magnetic_axis.r)
indZ0_LFS = np.argmin(np.abs(equi.time_slice[arg_time].boundary.outline.z[mask_LFS]))
mask_HFS = (equi.time_slice[arg_time].boundary.outline.r < equi.time_slice[arg_time].global_quantities.magnetic_axis.r)
indZ0_HFS = np.argmin(np.abs(equi.time_slice[arg_time].boundary.outline.z[mask_HFS]))
for iquant in lquantities:
start = time.time()
#sys.stdout = open(os.devnull, 'w')
out = equimap.get(REF_SHOT, time=time_in, R=R_in_tot, Phi=Phi_in_tot, \
Z=Z_in_tot, quantity=iquant, no_ripple=False, \
run=REF_RUN, occ=REF_OCC, user=REF_USER, \
machine=REF_MACHINE)
#sys.stdout = sys.__stdout__
end = time.time()
print()
print('====================================')
print('time (large time input) for', iquant, ' =', end - start)
print('Z_axis =', equi.time_slice[arg_time].global_quantities.magnetic_axis.z)
print('====================================')
print()
if (args.figures):
outr = out[int(0.5*out.shape[0])].reshape((interp_points, interp_points))
plt.figure()
plt.contourf(Rr[:, :], Phir[:, :], outr[:, :])
plt.axvline(np.squeeze(equi.time_slice[arg_time].boundary.outline.r[mask_LFS][indZ0_LFS]), \
linewidth=2, color='red')
plt.axvline(np.squeeze(equi.time_slice[arg_time].boundary.outline.r[mask_HFS][indZ0_HFS]), \
linewidth=2, color='red')
plt.axvline(equi.time_slice[arg_time].global_quantities.magnetic_axis.r, \
linewidth=2, color='red', linestyle='--')
plt.xlabel('R [m]')
plt.ylabel('Phi [rad]')
plt.title('{0} t={1:.2f}'.format(iquant, time_in[int(0.5*out.shape[0])]))
plt.colorbar()
# Save results in dict
results[iquant + '_LT'] = out
# End loop on lquantities
if (args.saveFile):
filename = 'reg_test_{0}_Run{1}_Occ{2}_User_{3}_Machine_{4}.npz'.format( \
REF_SHOT, REF_RUN, REF_OCC, REF_USER, REF_MACHINE)
np.savez(filename, **results)
if (args.figures):
plt.show()
ref = np.load(REF_FILE)
for iquant in lquantities:
eval_diff(results[iquant], ref[iquant], iquant)
eval_diff(results[iquant + '_LT'], ref[iquant + '_LT'], iquant + '_LT')
print()
print('End regression test')
print()
| mit |
lancezlin/ml_template_py | lib/python2.7/site-packages/sklearn/tests/test_kernel_approximation.py | 78 | 7586 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn.utils.testing import assert_array_equal, assert_equal, assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_array_almost_equal, assert_raises
from sklearn.utils.testing import assert_less_equal
from sklearn.metrics.pairwise import kernel_metrics
from sklearn.kernel_approximation import RBFSampler
from sklearn.kernel_approximation import AdditiveChi2Sampler
from sklearn.kernel_approximation import SkewedChi2Sampler
from sklearn.kernel_approximation import Nystroem
from sklearn.metrics.pairwise import polynomial_kernel, rbf_kernel
# generate data
rng = np.random.RandomState(0)
X = rng.random_sample(size=(300, 50))
Y = rng.random_sample(size=(300, 50))
X /= X.sum(axis=1)[:, np.newaxis]
Y /= Y.sum(axis=1)[:, np.newaxis]
def test_additive_chi2_sampler():
# test that AdditiveChi2Sampler approximates kernel on random data
# compute exact kernel
# abbreviations for easier formula
X_ = X[:, np.newaxis, :]
Y_ = Y[np.newaxis, :, :]
large_kernel = 2 * X_ * Y_ / (X_ + Y_)
# reduce to n_samples_x x n_samples_y by summing over features
kernel = (large_kernel.sum(axis=2))
# approximate kernel mapping
transform = AdditiveChi2Sampler(sample_steps=3)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
X_sp_trans = transform.fit_transform(csr_matrix(X))
Y_sp_trans = transform.transform(csr_matrix(Y))
assert_array_equal(X_trans, X_sp_trans.A)
assert_array_equal(Y_trans, Y_sp_trans.A)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
# test error on invalid sample_steps
transform = AdditiveChi2Sampler(sample_steps=4)
assert_raises(ValueError, transform.fit, X)
# test that the sample interval is set correctly
sample_steps_available = [1, 2, 3]
for sample_steps in sample_steps_available:
# test that the sample_interval is initialized correctly
transform = AdditiveChi2Sampler(sample_steps=sample_steps)
assert_equal(transform.sample_interval, None)
# test that the sample_interval is changed in the fit method
transform.fit(X)
assert_not_equal(transform.sample_interval_, None)
# test that the sample_interval is set correctly
sample_interval = 0.3
transform = AdditiveChi2Sampler(sample_steps=4,
sample_interval=sample_interval)
assert_equal(transform.sample_interval, sample_interval)
transform.fit(X)
assert_equal(transform.sample_interval_, sample_interval)
def test_skewed_chi2_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
c = 0.03
# abbreviations for easier formula
X_c = (X + c)[:, np.newaxis, :]
Y_c = (Y + c)[np.newaxis, :, :]
# we do it in log-space in the hope that it's more stable
# this array is n_samples_x x n_samples_y big x n_features
log_kernel = ((np.log(X_c) / 2.) + (np.log(Y_c) / 2.) + np.log(2.) -
np.log(X_c + Y_c))
# reduce to n_samples_x x n_samples_y by summing over features in log-space
kernel = np.exp(log_kernel.sum(axis=2))
# approximate kernel mapping
transform = SkewedChi2Sampler(skewedness=c, n_components=1000,
random_state=42)
X_trans = transform.fit_transform(X)
Y_trans = transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
assert_array_almost_equal(kernel, kernel_approx, 1)
# test error is raised on negative input
Y_neg = Y.copy()
Y_neg[0, 0] = -1
assert_raises(ValueError, transform.transform, Y_neg)
def test_rbf_sampler():
# test that RBFSampler approximates kernel on random data
# compute exact kernel
gamma = 10.
kernel = rbf_kernel(X, Y, gamma=gamma)
# approximate kernel mapping
rbf_transform = RBFSampler(gamma=gamma, n_components=1000, random_state=42)
X_trans = rbf_transform.fit_transform(X)
Y_trans = rbf_transform.transform(Y)
kernel_approx = np.dot(X_trans, Y_trans.T)
error = kernel - kernel_approx
assert_less_equal(np.abs(np.mean(error)), 0.01) # close to unbiased
np.abs(error, out=error)
assert_less_equal(np.max(error), 0.1) # nothing too far off
assert_less_equal(np.mean(error), 0.05) # mean is fairly close
def test_input_validation():
# Regression test: kernel approx. transformers should work on lists
# No assertions; the old versions would simply crash
X = [[1, 2], [3, 4], [5, 6]]
AdditiveChi2Sampler().fit(X).transform(X)
SkewedChi2Sampler().fit(X).transform(X)
RBFSampler().fit(X).transform(X)
X = csr_matrix(X)
RBFSampler().fit(X).transform(X)
def test_nystroem_approximation():
# some basic tests
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 4))
# With n_components = n_samples this is exact
X_transformed = Nystroem(n_components=X.shape[0]).fit_transform(X)
K = rbf_kernel(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
trans = Nystroem(n_components=2, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test callable kernel
linear_kernel = lambda X, Y: np.dot(X, Y.T)
trans = Nystroem(n_components=2, kernel=linear_kernel, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
# test that available kernels fit and transform
kernels_available = kernel_metrics()
for kern in kernels_available:
trans = Nystroem(n_components=2, kernel=kern, random_state=rnd)
X_transformed = trans.fit(X).transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 2))
def test_nystroem_singular_kernel():
# test that nystroem works with singular kernel matrix
rng = np.random.RandomState(0)
X = rng.rand(10, 20)
X = np.vstack([X] * 2) # duplicate samples
gamma = 100
N = Nystroem(gamma=gamma, n_components=X.shape[0]).fit(X)
X_transformed = N.transform(X)
K = rbf_kernel(X, gamma=gamma)
assert_array_almost_equal(K, np.dot(X_transformed, X_transformed.T))
assert_true(np.all(np.isfinite(Y)))
def test_nystroem_poly_kernel_params():
# Non-regression: Nystroem should pass other parameters beside gamma.
rnd = np.random.RandomState(37)
X = rnd.uniform(size=(10, 4))
K = polynomial_kernel(X, degree=3.1, coef0=.1)
nystroem = Nystroem(kernel="polynomial", n_components=X.shape[0],
degree=3.1, coef0=.1)
X_transformed = nystroem.fit_transform(X)
assert_array_almost_equal(np.dot(X_transformed, X_transformed.T), K)
def test_nystroem_callable():
# Test Nystroem on a callable.
rnd = np.random.RandomState(42)
n_samples = 10
X = rnd.uniform(size=(n_samples, 4))
def logging_histogram_kernel(x, y, log):
"""Histogram kernel that writes to a log."""
log.append(1)
return np.minimum(x, y).sum()
kernel_log = []
X = list(X) # test input validation
Nystroem(kernel=logging_histogram_kernel,
n_components=(n_samples - 1),
kernel_params={'log': kernel_log}).fit(X)
assert_equal(len(kernel_log), n_samples * (n_samples - 1) / 2)
| mit |
Alexsaphir/TP_EDP_Python | TP2_error.py | 1 | 1912 | # -*- coding: utf-8 -*-
from numpy import * # importation du module numpy
from numpy.linalg import * # importation du module numpy.linalg
from numpy.random import *
from matplotlib.pyplot import *
from mpl_toolkits.mplot3d import Axes3D
#Calcul l'erreur en faisant varier Ns
def Ud(x):
y = sin(2*pi*x)*sinh(2*pi)
return y
# Fonction définissant la solution exacte de l'équation
def solex(x, y):
z = sin(2*pi*x)*sinh(2*pi*y)
return z
def solver(Ns):
# Maillage
h = 1./(Ns + 1)
X = linspace(0,1,Ns+2)
Xh = X[1:Ns+1]
# Matrice du système linéaire
A = -1*(diag(ones(Ns*Ns-3),3) + diag(ones(Ns*Ns-3),-3))
B = 4*eye(Ns) -1*(diag(ones(Ns-1),1) + diag(ones(Ns-1),-1))
for i in arange(0,Ns):
A[Ns*i:Ns*i+Ns,Ns*i:Ns*i+Ns] = B
# Second membre
b = zeros(Ns*Ns)
b[Ns*(Ns-1):Ns*Ns] = Ud(Xh)
# Resolution du systeme lineaire
A_inv = linalg.inv(A)
Uh = solve(A_inv, b)
#Mise en forme de la matrice Zh
Zh = array( 0*Ud(X))
for i in arange (0, Ns, 1):
newrow = Uh[ i*(Ns):i*Ns+Ns]
newrow =concatenate([[0], newrow, [0]])
Zh = vstack([newrow, Zh])
Zh = vstack([Ud(X), Zh])
#Calcul du maillage
coordX, coordY= np.meshgrid(X, flip(X,0))
#Calcul de la solution exacte sur le maillage
U = solex(coordX,coordY)
#Calcul de l'erreur
Err = amax(absolute(U-Zh))
#fig = figure()
#ax = Axes3D(fig, azim = 30, elev = 30)
#ax.plot_surface(coordX, coordY, Zh, cmap = cm.jet)
#ax.plot_surface(coordX, coordY, U, cmap = cm.jet)
#fig.show()
return Err
def Err_Conv(N):
E=zeros(N-3)
for i in arange(3, N,1):
E[i-3]=solver(i)
plot(linspace(3,N-1,N-3),E,label='Erreur')
xlabel('Nb de points utilsés (log)')
ylabel('Erreur max mesurée')
title('Equation de Laplace 2D: Etude de la convergence')
xscale('log')
savefig('Picture/TP2/Erreur.png')
| lgpl-3.0 |
vortex-ape/scikit-learn | examples/model_selection/plot_nested_cross_validation_iris.py | 9 | 4415 | """
=========================================
Nested versus non-nested cross-validation
=========================================
This example compares non-nested and nested cross-validation strategies on a
classifier of the iris data set. Nested cross-validation (CV) is often used to
train a model in which hyperparameters also need to be optimized. Nested CV
estimates the generalization error of the underlying model and its
(hyper)parameter search. Choosing the parameters that maximize non-nested CV
biases the model to the dataset, yielding an overly-optimistic score.
Model selection without nested CV uses the same data to tune model parameters
and evaluate model performance. Information may thus "leak" into the model
and overfit the data. The magnitude of this effect is primarily dependent on
the size of the dataset and the stability of the model. See Cawley and Talbot
[1]_ for an analysis of these issues.
To avoid this problem, nested CV effectively uses a series of
train/validation/test set splits. In the inner loop (here executed by
:class:`GridSearchCV <sklearn.model_selection.GridSearchCV>`), the score is
approximately maximized by fitting a model to each training set, and then
directly maximized in selecting (hyper)parameters over the validation set. In
the outer loop (here in :func:`cross_val_score
<sklearn.model_selection.cross_val_score>`), generalization error is estimated
by averaging test set scores over several dataset splits.
The example below uses a support vector classifier with a non-linear kernel to
build a model with optimized hyperparameters by grid search. We compare the
performance of non-nested and nested CV strategies by taking the difference
between their scores.
.. topic:: See Also:
- :ref:`cross_validation`
- :ref:`grid_search`
.. topic:: References:
.. [1] `Cawley, G.C.; Talbot, N.L.C. On over-fitting in model selection and
subsequent selection bias in performance evaluation.
J. Mach. Learn. Res 2010,11, 2079-2107.
<http://jmlr.csail.mit.edu/papers/volume11/cawley10a/cawley10a.pdf>`_
"""
from sklearn.datasets import load_iris
from matplotlib import pyplot as plt
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV, cross_val_score, KFold
import numpy as np
print(__doc__)
# Number of random trials
NUM_TRIALS = 30
# Load the dataset
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
# Set up possible values of parameters to optimize over
p_grid = {"C": [1, 10, 100],
"gamma": [.01, .1]}
# We will use a Support Vector Classifier with "rbf" kernel
svm = SVC(kernel="rbf")
# Arrays to store scores
non_nested_scores = np.zeros(NUM_TRIALS)
nested_scores = np.zeros(NUM_TRIALS)
# Loop for each trial
for i in range(NUM_TRIALS):
# Choose cross-validation techniques for the inner and outer loops,
# independently of the dataset.
# E.g "GroupKFold", "LeaveOneOut", "LeaveOneGroupOut", etc.
inner_cv = KFold(n_splits=4, shuffle=True, random_state=i)
outer_cv = KFold(n_splits=4, shuffle=True, random_state=i)
# Non_nested parameter search and scoring
clf = GridSearchCV(estimator=svm, param_grid=p_grid, cv=inner_cv)
clf.fit(X_iris, y_iris)
non_nested_scores[i] = clf.best_score_
# Nested CV with parameter optimization
nested_score = cross_val_score(clf, X=X_iris, y=y_iris, cv=outer_cv)
nested_scores[i] = nested_score.mean()
score_difference = non_nested_scores - nested_scores
print("Average difference of {0:6f} with std. dev. of {1:6f}."
.format(score_difference.mean(), score_difference.std()))
# Plot scores on each trial for nested and non-nested CV
plt.figure()
plt.subplot(211)
non_nested_scores_line, = plt.plot(non_nested_scores, color='r')
nested_line, = plt.plot(nested_scores, color='b')
plt.ylabel("score", fontsize="14")
plt.legend([non_nested_scores_line, nested_line],
["Non-Nested CV", "Nested CV"],
bbox_to_anchor=(0, .4, .5, 0))
plt.title("Non-Nested and Nested Cross Validation on Iris Dataset",
x=.5, y=1.1, fontsize="15")
# Plot bar chart of the difference.
plt.subplot(212)
difference_plot = plt.bar(range(NUM_TRIALS), score_difference)
plt.xlabel("Individual Trial #")
plt.legend([difference_plot],
["Non-Nested CV - Nested CV Score"],
bbox_to_anchor=(0, 1, .8, 0))
plt.ylabel("score difference", fontsize="14")
plt.show()
| bsd-3-clause |
abergeron/pylearn2 | pylearn2/utils/image.py | 39 | 18841 | """
Utility functions for working with images.
"""
import logging
import numpy as np
plt = None
axes = None
from theano.compat.six.moves import xrange
from theano.compat.six import string_types
import warnings
try:
import matplotlib.pyplot as plt
import matplotlib.axes
except (RuntimeError, ImportError, TypeError) as matplotlib_exception:
warnings.warn("Unable to import matplotlib. Some features unavailable. "
"Original exception: " + str(matplotlib_exception))
import os
try:
from PIL import Image
except ImportError:
Image = None
from pylearn2.utils import string_utils as string
from pylearn2.utils.exc import reraise_as
from tempfile import mkstemp
from multiprocessing import Process
import subprocess
logger = logging.getLogger(__name__)
def ensure_Image():
"""Makes sure Image has been imported from PIL"""
global Image
if Image is None:
raise RuntimeError("You are trying to use PIL-dependent functionality"
" but don't have PIL installed.")
def imview(*args, **kwargs):
"""
A matplotlib-based image viewer command,
wrapping `matplotlib.pyplot.imshow` but behaving more
sensibly.
Parameters
----------
figure : TODO
TODO: write parameters section using decorators to inherit
the matplotlib docstring
Notes
-----
Parameters are identical to `matplotlib.pyplot.imshow`
but this behaves somewhat differently:
* By default, it creates a new figure (unless a
`figure` keyword argument is supplied.
* It modifies the axes of that figure to use the
full frame, without ticks or tick labels.
* It turns on `nearest` interpolation by default
(i.e., it does not antialias pixel data). This
can be overridden with the `interpolation`
argument as in `imshow`.
All other arguments and keyword arguments are passed
on to `imshow`.`
"""
if 'figure' not in kwargs:
f = plt.figure()
else:
f = kwargs['figure']
new_ax = matplotlib.axes.Axes(f,
[0, 0, 1, 1],
xticks=[],
yticks=[],
frame_on=False)
f.delaxes(f.gca())
f.add_axes(new_ax)
if len(args) < 5 and 'interpolation' not in kwargs:
kwargs['interpolation'] = 'nearest'
plt.imshow(*args, **kwargs)
def imview_async(*args, **kwargs):
"""
A version of `imview` that forks a separate process and
immediately shows the image.
Parameters
----------
window_title : str
TODO: writeme with decorators to inherit the other imviews'
docstrings
Notes
-----
Supports the `window_title` keyword argument to cope with
the title always being 'Figure 1'.
Returns the `multiprocessing.Process` handle.
"""
if 'figure' in kwargs:
raise ValueError("passing a figure argument not supported")
def fork_image_viewer():
f = plt.figure()
kwargs['figure'] = f
imview(*args, **kwargs)
if 'window_title' in kwargs:
f.set_window_title(kwargs['window_title'])
plt.show()
p = Process(None, fork_image_viewer)
p.start()
return p
def show(image):
"""
.. todo::
WRITEME
Parameters
----------
image : PIL Image object or ndarray
If ndarray, integer formats are assumed to use 0-255
and float formats are assumed to use 0-1
"""
viewer_command = string.preprocess('${PYLEARN2_VIEWER_COMMAND}')
if viewer_command == 'inline':
return imview(image)
if hasattr(image, '__array__'):
# do some shape checking because PIL just raises a tuple indexing error
# that doesn't make it very clear what the problem is
if len(image.shape) < 2 or len(image.shape) > 3:
raise ValueError('image must have either 2 or 3 dimensions but its'
' shape is ' + str(image.shape))
# The below is a temporary workaround that prevents us from crashing
# 3rd party image viewers such as eog by writing out overly large
# images.
# In the long run we should determine if this is a bug in PIL when
# producing
# such images or a bug in eog and determine a proper fix.
# Since this is hopefully just a short term workaround the
# constants below are not included in the interface to the
# function, so that 3rd party code won't start passing them.
max_height = 4096
max_width = 4096
# Display separate warnings for each direction, since it's
# common to crop only one.
if image.shape[0] > max_height:
image = image[0:max_height, :, :]
warnings.warn("Cropping image to smaller height to avoid crashing "
"the viewer program.")
if image.shape[0] > max_width:
image = image[:, 0:max_width, :]
warnings.warn("Cropping the image to a smaller width to avoid "
"crashing the viewer program.")
# This ends the workaround
if image.dtype == 'int8':
image = np.cast['uint8'](image)
elif str(image.dtype).startswith('float'):
# don't use *=, we don't want to modify the input array
image = image * 255.
image = np.cast['uint8'](image)
# PIL is too stupid to handle single-channel arrays
if len(image.shape) == 3 and image.shape[2] == 1:
image = image[:, :, 0]
try:
ensure_Image()
image = Image.fromarray(image)
except TypeError:
reraise_as(TypeError("PIL issued TypeError on ndarray of shape " +
str(image.shape) + " and dtype " +
str(image.dtype)))
# Create a temporary file with the suffix '.png'.
fd, name = mkstemp(suffix='.png')
os.close(fd)
# Note:
# Although we can use tempfile.NamedTemporaryFile() to create
# a temporary file, the function should be used with care.
#
# In Python earlier than 2.7, a temporary file created by the
# function will be deleted just after the file is closed.
# We can re-use the name of the temporary file, but there is an
# instant where a file with the name does not exist in the file
# system before we re-use the name. This may cause a race
# condition.
#
# In Python 2.7 or later, tempfile.NamedTemporaryFile() has
# the 'delete' argument which can control whether a temporary
# file will be automatically deleted or not. With the argument,
# the above race condition can be avoided.
#
image.save(name)
if os.name == 'nt':
subprocess.Popen(viewer_command + ' ' + name + ' && del ' + name,
shell=True)
else:
subprocess.Popen(viewer_command + ' ' + name + ' ; rm ' + name,
shell=True)
def pil_from_ndarray(ndarray):
"""
Converts an ndarray to a PIL image.
Parameters
----------
ndarray : ndarray
An ndarray containing an image.
Returns
-------
pil : PIL Image
A PIL Image containing the image.
"""
try:
if ndarray.dtype == 'float32' or ndarray.dtype == 'float64':
assert ndarray.min() >= 0.0
assert ndarray.max() <= 1.0
ndarray = np.cast['uint8'](ndarray * 255)
if len(ndarray.shape) == 3 and ndarray.shape[2] == 1:
ndarray = ndarray[:, :, 0]
ensure_Image()
rval = Image.fromarray(ndarray)
return rval
except Exception as e:
logger.exception('original exception: ')
logger.exception(e)
logger.exception('ndarray.dtype: {0}'.format(ndarray.dtype))
logger.exception('ndarray.shape: {0}'.format(ndarray.shape))
raise
assert False
def ndarray_from_pil(pil, dtype='uint8'):
"""
Converts a PIL Image to an ndarray.
Parameters
----------
pil : PIL Image
An image represented as a PIL Image object
dtype : str
The dtype of ndarray to create
Returns
-------
ndarray : ndarray
The image as an ndarray.
"""
rval = np.asarray(pil)
if dtype != rval.dtype:
rval = np.cast[dtype](rval)
if str(dtype).startswith('float'):
rval /= 255.
if len(rval.shape) == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
return rval
def rescale(image, shape):
"""
Scales image to be no larger than shape. PIL might give you
unexpected results beyond that.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
i = pil_from_ndarray(image)
ensure_Image()
i.thumbnail([shape[1], shape[0]], Image.ANTIALIAS)
rval = ndarray_from_pil(i, dtype=image.dtype)
return rval
resize = rescale
def fit_inside(image, shape):
"""
Scales image down to fit inside shape preserves proportions of image
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
if image.shape[0] <= shape[0] and image.shape[1] <= shape[1]:
return image.copy()
row_ratio = float(image.shape[0]) / float(shape[0])
col_ratio = float(image.shape[1]) / float(shape[1])
if row_ratio > col_ratio:
target_shape = [shape[0], min(image.shape[1] / row_ratio, shape[1])]
else:
target_shape = [min(image.shape[0] / col_ratio, shape[0]), shape[1]]
assert target_shape[0] <= shape[0]
assert target_shape[1] <= shape[1]
assert target_shape[0] == shape[0] or target_shape[1] == shape[1]
rval = rescale(image, target_shape)
return rval
def letterbox(image, shape):
"""
Pads image with black letterboxing to bring image.shape up to shape
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3 # rows, cols, channels
assert len(shape) == 2 # rows, cols
assert image.shape[0] <= shape[0]
assert image.shape[1] <= shape[1]
if image.shape[0] == shape[0] and image.shape[1] == shape[1]:
return image.copy()
rval = np.zeros((shape[0], shape[1], image.shape[2]), dtype=image.dtype)
rstart = (shape[0] - image.shape[0]) / 2
cstart = (shape[1] - image.shape[1]) / 2
rend = rstart + image.shape[0]
cend = cstart + image.shape[1]
rval[rstart:rend, cstart:cend] = image
return rval
def make_letterboxed_thumbnail(image, shape):
"""
Scales image down to shape. Preserves proportions of image, introduces
black letterboxing if necessary.
Parameters
----------
image : WRITEME
shape : WRITEME
Returns
-------
WRITEME
"""
assert len(image.shape) == 3
assert len(shape) == 2
shrunk = fit_inside(image, shape)
letterboxed = letterbox(shrunk, shape)
return letterboxed
def load(filepath, rescale_image=True, dtype='float64'):
"""
Load an image from a file.
Parameters
----------
filepath : str
Path to the image file to load
rescale_image : bool
Default value: True
If True, returned images have pixel values in [0, 1]. Otherwise,
values are in [0, 255].
dtype: str
The dtype to use for the returned value
Returns
-------
img : numpy ndarray
An array containing the image that was in the file.
"""
assert isinstance(filepath, string_types)
if not rescale_image and dtype == 'uint8':
ensure_Image()
rval = np.asarray(Image.open(filepath))
assert rval.dtype == 'uint8'
return rval
s = 1.0
if rescale_image:
s = 255.
try:
ensure_Image()
rval = Image.open(filepath)
except Exception:
reraise_as(Exception("Could not open " + filepath))
numpy_rval = np.array(rval)
msg = ("Tried to load an image, got an array with %d"
" dimensions. Expected 2 or 3."
"This may indicate a mildly corrupted image file. Try "
"converting it to a different image format with a different "
"editor like gimp or imagemagic. Sometimes these programs are "
"more robust to minor corruption than PIL and will emit a "
"correctly formatted image in the new format.")
if numpy_rval.ndim not in [2, 3]:
logger.error(dir(rval))
logger.error(rval)
logger.error(rval.size)
rval.show()
raise AssertionError(msg % numpy_rval.ndim)
rval = numpy_rval
rval = np.cast[dtype](rval) / s
if rval.ndim == 2:
rval = rval.reshape(rval.shape[0], rval.shape[1], 1)
if rval.ndim != 3:
raise AssertionError("Something went wrong opening " +
filepath + '. Resulting shape is ' +
str(rval.shape) +
" (it's meant to have 3 dimensions by now)")
return rval
def save(filepath, ndarray):
"""
Saves an image to a file.
Parameters
----------
filepath : str
The path to write the file to.
ndarray : ndarray
An array containing the image to be saved.
"""
pil_from_ndarray(ndarray).save(filepath)
def scale_to_unit_interval(ndar, eps=1e-8):
"""
Scales all values in the ndarray ndar to be between 0 and 1
Parameters
----------
ndar : WRITEME
eps : WRITEME
Returns
-------
WRITEME
"""
ndar = ndar.copy()
ndar -= ndar.min()
ndar *= 1.0 / (ndar.max() + eps)
return ndar
def tile_raster_images(X, img_shape, tile_shape, tile_spacing=(0, 0),
scale_rows_to_unit_interval=True,
output_pixel_vals=True):
"""
Transform an array with one flattened image per row, into an array in
which images are reshaped and layed out like tiles on a floor.
This function is useful for visualizing datasets whose rows are images,
and also columns of matrices for transforming those rows
(such as the first layer of a neural net).
Parameters
----------
x : numpy.ndarray
2-d ndarray or 4 tuple of 2-d ndarrays or None for channels,
in which every row is a flattened image.
shape : 2-tuple of ints
The first component is the height of each image,
the second component is the width.
tile_shape : 2-tuple of ints
The number of images to tile in (row, columns) form.
scale_rows_to_unit_interval : bool
Whether or not the values need to be before being plotted to [0, 1].
output_pixel_vals : bool
Whether or not the output should be pixel values (int8) or floats.
Returns
-------
y : 2d-ndarray
The return value has the same dtype as X, and is suitable for
viewing as an image with PIL.Image.fromarray.
"""
assert len(img_shape) == 2
assert len(tile_shape) == 2
assert len(tile_spacing) == 2
# The expression below can be re-written in a more C style as
# follows :
#
# out_shape = [0,0]
# out_shape[0] = (img_shape[0]+tile_spacing[0])*tile_shape[0] -
# tile_spacing[0]
# out_shape[1] = (img_shape[1]+tile_spacing[1])*tile_shape[1] -
# tile_spacing[1]
out_shape = [(ishp + tsp) * tshp - tsp for ishp, tshp, tsp
in zip(img_shape, tile_shape, tile_spacing)]
if isinstance(X, tuple):
assert len(X) == 4
# Create an output np ndarray to store the image
if output_pixel_vals:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype='uint8')
else:
out_array = np.zeros((out_shape[0], out_shape[1], 4),
dtype=X.dtype)
# colors default to 0, alpha defaults to 1 (opaque)
if output_pixel_vals:
channel_defaults = [0, 0, 0, 255]
else:
channel_defaults = [0., 0., 0., 1.]
for i in xrange(4):
if X[i] is None:
# if channel is None, fill it with zeros of the correct
# dtype
dt = out_array.dtype
if output_pixel_vals:
dt = 'uint8'
out_array[:, :, i] = np.zeros(out_shape, dtype=dt) + \
channel_defaults[i]
else:
# use a recurrent call to compute the channel and store it
# in the output
out_array[:, :, i] = tile_raster_images(
X[i], img_shape, tile_shape, tile_spacing,
scale_rows_to_unit_interval, output_pixel_vals)
return out_array
else:
# if we are dealing with only one channel
H, W = img_shape
Hs, Ws = tile_spacing
# generate a matrix to store the output
dt = X.dtype
if output_pixel_vals:
dt = 'uint8'
out_array = np.zeros(out_shape, dtype=dt)
for tile_row in xrange(tile_shape[0]):
for tile_col in xrange(tile_shape[1]):
if tile_row * tile_shape[1] + tile_col < X.shape[0]:
this_x = X[tile_row * tile_shape[1] + tile_col]
if scale_rows_to_unit_interval:
# if we should scale values to be between 0 and 1
# do this by calling the `scale_to_unit_interval`
# function
this_img = scale_to_unit_interval(
this_x.reshape(img_shape))
else:
this_img = this_x.reshape(img_shape)
# add the slice to the corresponding position in the
# output array
c = 1
if output_pixel_vals:
c = 255
out_array[
tile_row * (H + Hs): tile_row * (H + Hs) + H,
tile_col * (W + Ws): tile_col * (W + Ws) + W
] = this_img * c
return out_array
if __name__ == '__main__':
black = np.zeros((50, 50, 3), dtype='uint8')
red = black.copy()
red[:, :, 0] = 255
green = black.copy()
green[:, :, 1] = 255
show(black)
show(green)
show(red)
| bsd-3-clause |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/axes_grid/demo_edge_colorbar.py | 11 | 2597 | import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import AxesGrid
def get_demo_image():
import numpy as np
from matplotlib.cbook import get_sample_data
f = get_sample_data("axes_grid/bivariate_normal.npy", asfileobj=False)
z = np.load(f)
# z is a numpy array of 15x15
return z, (-3,4,-4,3)
def demo_bottom_cbar(fig):
"""
A grid of 2x2 images with a colorbar for each column.
"""
grid = AxesGrid(fig, 121, # similar to subplot(132)
nrows_ncols = (2, 2),
axes_pad = 0.10,
share_all=True,
label_mode = "1",
cbar_location = "bottom",
cbar_mode="edge",
cbar_pad = 0.25,
cbar_size = "15%",
direction="column"
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("autumn"), plt.get_cmap("summer")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
cbar = grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label("Bar")
# This affects all axes as share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
def demo_right_cbar(fig):
"""
A grid of 2x2 images. Each row has its own colorbar.
"""
grid = AxesGrid(F, 122, # similar to subplot(122)
nrows_ncols = (2, 2),
axes_pad = 0.10,
label_mode = "1",
share_all = True,
cbar_location="right",
cbar_mode="edge",
cbar_size="7%",
cbar_pad="2%",
)
Z, extent = get_demo_image()
cmaps = [plt.get_cmap("spring"), plt.get_cmap("winter")]
for i in range(4):
im = grid[i].imshow(Z, extent=extent, interpolation="nearest",
cmap=cmaps[i//2])
if i % 2:
grid.cbar_axes[i//2].colorbar(im)
for cax in grid.cbar_axes:
cax.toggle_label(True)
cax.axis[cax.orientation].set_label('Foo')
# This affects all axes because we set share_all = True.
grid.axes_llc.set_xticks([-2, 0, 2])
grid.axes_llc.set_yticks([-2, 0, 2])
if 1:
F = plt.figure(1, (5.5, 2.5))
F.subplots_adjust(left=0.05, right=0.93)
demo_bottom_cbar(F)
demo_right_cbar(F)
plt.draw()
plt.show()
| mit |
suyashdb/hcp2bids | setup.py | 1 | 2182 | from setuptools import setup
import os, glob, shutil
import re, json, numpy
import nibabel as ni
here = os.path.abspath(os.path.dirname(__file__))
setup(
name="hcp2bids",
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# http://packaging.python.org/en/latest/tutorial.html#version
version='0.1.0',
description="Command line tool to convert HCP dataset to a Brain Imaging Data Structure "
"compatible dataset.",
long_description="Command line tool to convert HCP dataset to a Brain Imaging Data Structure "
"compatible dataset.",
# The project URL.
url='https://github.com/suyashdb/hcp2bids',
# Choose your license
license='BSD',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: BSD License',
# Specify the Python versions you support here. In particular, ensure
# that you indicate whether you support Python 2, Python 3 or both.
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='BIDS HCP NIH',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages.
packages=["hcp2bids"],
# List run-time dependencies here. These will be installed by pip when your
# project is installed.
install_requires = ["numpy",
"pandas",
'nibabel'],
include_package_data=True,
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'hcp2bids=hcp2bids.main:main',
],
},
)
| mit |
andyraib/data-storage | python_scripts/env/lib/python3.6/site-packages/pandas/tseries/plotting.py | 7 | 9969 | """
Period formatters and locators adapted from scikits.timeseries by
Pierre GF Gerard-Marchant & Matt Knox
"""
# TODO: Use the fact that axis can have units to simplify the process
import numpy as np
from matplotlib import pylab
from pandas.tseries.period import Period
from pandas.tseries.offsets import DateOffset
import pandas.tseries.frequencies as frequencies
from pandas.tseries.index import DatetimeIndex
from pandas.formats.printing import pprint_thing
import pandas.compat as compat
from pandas.tseries.converter import (TimeSeries_DateLocator,
TimeSeries_DateFormatter)
# ---------------------------------------------------------------------
# Plotting functions and monkey patches
def tsplot(series, plotf, ax=None, **kwargs):
"""
Plots a Series on the given Matplotlib axes or the current axes
Parameters
----------
axes : Axes
series : Series
Notes
_____
Supports same kwargs as Axes.plot
"""
# Used inferred freq is possible, need a test case for inferred
if ax is None:
import matplotlib.pyplot as plt
ax = plt.gca()
freq, series = _maybe_resample(series, ax, kwargs)
# Set ax with freq info
_decorate_axes(ax, freq, kwargs)
ax._plot_data.append((series, plotf, kwargs))
lines = plotf(ax, series.index._mpl_repr(), series.values, **kwargs)
# set date formatter, locators and rescale limits
format_dateaxis(ax, ax.freq)
return lines
def _maybe_resample(series, ax, kwargs):
# resample against axes freq if necessary
freq, ax_freq = _get_freq(ax, series)
if freq is None: # pragma: no cover
raise ValueError('Cannot use dynamic axis without frequency info')
# Convert DatetimeIndex to PeriodIndex
if isinstance(series.index, DatetimeIndex):
series = series.to_period(freq=freq)
if ax_freq is not None and freq != ax_freq:
if frequencies.is_superperiod(freq, ax_freq): # upsample input
series = series.copy()
series.index = series.index.asfreq(ax_freq, how='s')
freq = ax_freq
elif _is_sup(freq, ax_freq): # one is weekly
how = kwargs.pop('how', 'last')
series = getattr(series.resample('D'), how)().dropna()
series = getattr(series.resample(ax_freq), how)().dropna()
freq = ax_freq
elif frequencies.is_subperiod(freq, ax_freq) or _is_sub(freq, ax_freq):
_upsample_others(ax, freq, kwargs)
ax_freq = freq
else: # pragma: no cover
raise ValueError('Incompatible frequency conversion')
return freq, series
def _is_sub(f1, f2):
return ((f1.startswith('W') and frequencies.is_subperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_subperiod(f1, 'D')))
def _is_sup(f1, f2):
return ((f1.startswith('W') and frequencies.is_superperiod('D', f2)) or
(f2.startswith('W') and frequencies.is_superperiod(f1, 'D')))
def _upsample_others(ax, freq, kwargs):
legend = ax.get_legend()
lines, labels = _replot_ax(ax, freq, kwargs)
_replot_ax(ax, freq, kwargs)
other_ax = None
if hasattr(ax, 'left_ax'):
other_ax = ax.left_ax
if hasattr(ax, 'right_ax'):
other_ax = ax.right_ax
if other_ax is not None:
rlines, rlabels = _replot_ax(other_ax, freq, kwargs)
lines.extend(rlines)
labels.extend(rlabels)
if (legend is not None and kwargs.get('legend', True) and
len(lines) > 0):
title = legend.get_title().get_text()
if title == 'None':
title = None
ax.legend(lines, labels, loc='best', title=title)
def _replot_ax(ax, freq, kwargs):
data = getattr(ax, '_plot_data', None)
# clear current axes and data
ax._plot_data = []
ax.clear()
_decorate_axes(ax, freq, kwargs)
lines = []
labels = []
if data is not None:
for series, plotf, kwds in data:
series = series.copy()
idx = series.index.asfreq(freq, how='S')
series.index = idx
ax._plot_data.append((series, plotf, kwds))
# for tsplot
if isinstance(plotf, compat.string_types):
from pandas.tools.plotting import _plot_klass
plotf = _plot_klass[plotf]._plot
lines.append(plotf(ax, series.index._mpl_repr(),
series.values, **kwds)[0])
labels.append(pprint_thing(series.name))
return lines, labels
def _decorate_axes(ax, freq, kwargs):
"""Initialize axes for time-series plotting"""
if not hasattr(ax, '_plot_data'):
ax._plot_data = []
ax.freq = freq
xaxis = ax.get_xaxis()
xaxis.freq = freq
if not hasattr(ax, 'legendlabels'):
ax.legendlabels = [kwargs.get('label', None)]
else:
ax.legendlabels.append(kwargs.get('label', None))
ax.view_interval = None
ax.date_axis_info = None
def _get_ax_freq(ax):
"""
Get the freq attribute of the ax object if set.
Also checks shared axes (eg when using secondary yaxis, sharex=True
or twinx)
"""
ax_freq = getattr(ax, 'freq', None)
if ax_freq is None:
# check for left/right ax in case of secondary yaxis
if hasattr(ax, 'left_ax'):
ax_freq = getattr(ax.left_ax, 'freq', None)
elif hasattr(ax, 'right_ax'):
ax_freq = getattr(ax.right_ax, 'freq', None)
if ax_freq is None:
# check if a shared ax (sharex/twinx) has already freq set
shared_axes = ax.get_shared_x_axes().get_siblings(ax)
if len(shared_axes) > 1:
for shared_ax in shared_axes:
ax_freq = getattr(shared_ax, 'freq', None)
if ax_freq is not None:
break
return ax_freq
def _get_freq(ax, series):
# get frequency from data
freq = getattr(series.index, 'freq', None)
if freq is None:
freq = getattr(series.index, 'inferred_freq', None)
ax_freq = _get_ax_freq(ax)
# use axes freq if no data freq
if freq is None:
freq = ax_freq
# get the period frequency
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
return freq, ax_freq
def _use_dynamic_x(ax, data):
freq = _get_index_freq(data)
ax_freq = _get_ax_freq(ax)
if freq is None: # convert irregular if axes has freq info
freq = ax_freq
else: # do not use tsplot if irregular was plotted first
if (ax_freq is None) and (len(ax.get_lines()) > 0):
return False
if freq is None:
return False
if isinstance(freq, DateOffset):
freq = freq.rule_code
else:
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
if freq is None:
return False
# hack this for 0.10.1, creating more technical debt...sigh
if isinstance(data.index, DatetimeIndex):
base = frequencies.get_freq(freq)
x = data.index
if (base <= frequencies.FreqGroup.FR_DAY):
return x[:1].is_normalized
return Period(x[0], freq).to_timestamp(tz=x.tz) == x[0]
return True
def _get_index_freq(data):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if freq == 'B':
weekdays = np.unique(data.index.dayofweek)
if (5 in weekdays) or (6 in weekdays):
freq = None
return freq
def _maybe_convert_index(ax, data):
# tsplot converts automatically, but don't want to convert index
# over and over for DataFrames
if isinstance(data.index, DatetimeIndex):
freq = getattr(data.index, 'freq', None)
if freq is None:
freq = getattr(data.index, 'inferred_freq', None)
if isinstance(freq, DateOffset):
freq = freq.rule_code
if freq is None:
freq = _get_ax_freq(ax)
if freq is None:
raise ValueError('Could not get frequency alias for plotting')
freq = frequencies.get_base_alias(freq)
freq = frequencies.get_period_alias(freq)
data = data.to_period(freq=freq)
return data
# Patch methods for subplot. Only format_dateaxis is currently used.
# Do we need the rest for convenience?
def format_dateaxis(subplot, freq):
"""
Pretty-formats the date axis (x-axis).
Major and minor ticks are automatically set for the frequency of the
current underlying series. As the dynamic mode is activated by
default, changing the limits of the x axis will intelligently change
the positions of the ticks.
"""
majlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minlocator = TimeSeries_DateLocator(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_locator(majlocator)
subplot.xaxis.set_minor_locator(minlocator)
majformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=False,
plot_obj=subplot)
minformatter = TimeSeries_DateFormatter(freq, dynamic_mode=True,
minor_locator=True,
plot_obj=subplot)
subplot.xaxis.set_major_formatter(majformatter)
subplot.xaxis.set_minor_formatter(minformatter)
# x and y coord info
subplot.format_coord = lambda t, y: (
"t = {0} y = {1:8f}".format(Period(ordinal=int(t), freq=freq), y))
pylab.draw_if_interactive()
| apache-2.0 |
aringh/odl | odl/contrib/solvers/spdhg/misc.py | 1 | 22813 | # Copyright 2014-2018 The ODL contributors
#
# This file is part of ODL.
#
# This Source Code Form is subject to the terms of the Mozilla Public License,
# v. 2.0. If a copy of the MPL was not distributed with this file, You can
# obtain one at https://mozilla.org/MPL/2.0/.
"""Functions for folders and files."""
from __future__ import print_function
from builtins import super
import numpy as np
import odl
import scipy.signal
import matplotlib
import matplotlib.pyplot as plt
from skimage.io import imsave
__all__ = ('total_variation', 'TotalVariationNonNegative', 'bregman',
'save_image', 'save_signal', 'divide_1Darray_equally', 'Blur2D',
'KullbackLeiblerSmooth')
def save_image(image, name, folder, fignum, cmap='gray', clim=None):
matplotlib.rc('text', usetex=False)
fig = plt.figure(fignum)
plt.clf()
image.show(name, cmap=cmap, fig=fig)
fig.savefig('{}/{}_fig.png'.format(folder, name), bbox_inches='tight')
if clim is None:
x = image - np.min(image)
if np.max(x) > 1e-4:
x /= np.max(x)
else:
x = (image - clim[0]) / (clim[1] - clim[0])
x = np.minimum(np.maximum(x, 0), 1)
imsave('{}/{}.png'.format(folder, name), np.rot90(x, 1))
def save_signal(signal, name, folder, fignum):
matplotlib.rc('text', usetex=False)
fig = plt.figure(fignum)
plt.clf()
signal.show(name, fig=fig)
fig.savefig('{}/{}_fig.png'.format(folder, name), bbox_inches='tight')
def bregman(f, v, subgrad):
return (odl.solvers.FunctionalQuadraticPerturb(f, linear_term=-subgrad) -
f(v) + subgrad.inner(v))
def partition_1d(arr, slices):
return tuple(arr[slc] for slc in slices)
def partition_equally_1d(arr, nparts, order='interlaced'):
if order == 'block':
stride = int(np.ceil(arr.size / nparts))
slc_list = [slice(i * stride, (i + 1) * stride) for i in range(nparts)]
elif order == 'interlaced':
slc_list = [slice(i, len(arr), nparts) for i in range(nparts)]
else:
raise ValueError
return partition_1d(arr, slc_list)
def divide_1Darray_equally(ind, nsub):
"""Divide an array into equal chunks to be used for instance in OSEM.
Parameters
----------
ind : ndarray
input array
nsubsets : int
number of subsets to be divided into
Returns
-------
sub2ind : list
list of indices for each subset
ind2sub : list
list of subsets for each index
"""
n_ind = len(ind)
sub2ind = partition_equally_1d(ind, nsub, order='interlaced')
ind2sub = []
for i in range(n_ind):
ind2sub.append([])
for i in range(nsub):
for j in sub2ind[i]:
ind2sub[j].append(i)
return (sub2ind, ind2sub)
def total_variation(domain, grad=None):
"""Total variation functional.
Parameters
----------
domain : odlspace
domain of TV functional
grad : gradient operator, optional
Gradient operator of the total variation functional. This may be any
linear operator and thereby generalizing TV. default=forward
differences with Neumann boundary conditions
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tv = spdhg.total_variation(space)
>>> x = space.one()
>>> tv(x) < 1e-10
"""
if grad is None:
grad = odl.Gradient(domain, method='forward', pad_mode='symmetric')
grad.norm = 2 * np.sqrt(sum(1 / grad.domain.cell_sides**2))
else:
grad = grad
f = odl.solvers.GroupL1Norm(grad.range, exponent=2)
return f * grad
class TotalVariationNonNegative(odl.solvers.Functional):
"""Total variation function with nonnegativity constraint and strongly
convex relaxation.
In formulas, this functional may represent
alpha * |grad x|_1 + char_fun(x) + beta/2 |x|^2_2
with regularization parameter alpha and strong convexity beta. In addition,
the nonnegativity constraint is achieved with the characteristic function
char_fun(x) = 0 if x >= 0 and infty else.
Parameters
----------
domain : odlspace
domain of TV functional
alpha : scalar, optional
Regularization parameter, positive
prox_options : dict, optional
name: string, optional
name of the method to perform the prox operator, default=FGP
warmstart: boolean, optional
Do you want a warm start, i.e. start with the dual variable
from the last call? default=True
niter: int, optional
number of iterations per call, default=5
p: array, optional
initial dual variable, default=zeros
grad : gradient operator, optional
Gradient operator to be used within the total variation functional.
default=see TV
"""
def __init__(self, domain, alpha=1, prox_options={}, grad=None,
strong_convexity=0):
"""
"""
self.strong_convexity = strong_convexity
if 'name' not in prox_options:
prox_options['name'] = 'FGP'
if 'warmstart' not in prox_options:
prox_options['warmstart'] = True
if 'niter' not in prox_options:
prox_options['niter'] = 5
if 'p' not in prox_options:
prox_options['p'] = None
if 'tol' not in prox_options:
prox_options['tol'] = None
self.prox_options = prox_options
self.alpha = alpha
self.tv = total_variation(domain, grad=grad)
self.grad = self.tv.right
self.nn = odl.solvers.IndicatorBox(domain, 0, np.inf)
self.l2 = 0.5 * odl.solvers.L2NormSquared(domain)
self.proj_P = self.tv.left.convex_conj.proximal(0)
self.proj_C = self.nn.proximal(1)
super().__init__(space=domain, linear=False, grad_lipschitz=0)
def __call__(self, x):
"""Evaluate functional.
Examples
--------
Check that the total variation of a constant is zero
>>> import odl.contrib.spdhg as spdhg, odl
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = space.one()
>>> tvnn(x) < 1e-10
Check that negative functions are mapped to infty
>>> import odl.contrib.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> np.isinf(tvnn(x))
"""
nn = self.nn(x)
if nn is np.inf:
return nn
else:
out = self.alpha * self.tv(x) + nn
if self.strong_convexity > 0:
out += self.strong_convexity * self.l2(x)
return out
def proximal(self, sigma):
"""Prox operator of TV. It allows the proximal step length to be a
vector of positive elements.
Examples
--------
Check that the proximal operator is the identity for sigma=0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0)(x)
>>> (y-x).norm() < 1e-10
Check that negative functions are mapped to 0
>>> import odl.contrib.solvers.spdhg as spdhg, odl, numpy as np
>>> space = odl.uniform_discr([0, 0], [3, 3], [3, 3])
>>> tvnn = spdhg.TotalVariationNonNegative(space, alpha=2)
>>> x = -space.one()
>>> y = tvnn.proximal(0.1)(x)
>>> y.norm() < 1e-10
"""
if sigma == 0:
return odl.IdentityOperator(self.domain)
else:
def tv_prox(z, out=None):
if out is None:
out = z.space.zero()
opts = self.prox_options
sigma_ = np.copy(sigma)
z_ = z.copy()
if self.strong_convexity > 0:
sigma_ /= (1 + sigma * self.strong_convexity)
z_ /= (1 + sigma * self.strong_convexity)
if opts['name'] == 'FGP':
if opts['warmstart']:
if opts['p'] is None:
opts['p'] = self.grad.range.zero()
p = opts['p']
else:
p = self.grad.range.zero()
sigma_sqrt = np.sqrt(sigma_)
z_ /= sigma_sqrt
grad = sigma_sqrt * self.grad
grad.norm = sigma_sqrt * self.grad.norm
niter = opts['niter']
alpha = self.alpha
out[:] = fgp_dual(p, z_, alpha, niter, grad, self.proj_C,
self.proj_P, tol=opts['tol'])
out *= sigma_sqrt
return out
else:
raise NotImplementedError('Not yet implemented')
return tv_prox
def fgp_dual(p, data, alpha, niter, grad, proj_C, proj_P, tol=None, **kwargs):
"""Computes a solution to the ROF problem with the fast gradient
projection algorithm.
Parameters
----------
p : np.array
dual initial variable
data : np.array
noisy data / proximal point
alpha : float
regularization parameter
niter : int
number of iterations
grad : instance of gradient class
class that supports grad(x), grad.adjoint(x), grad.norm
proj_C : function
projection onto the constraint set of the primal variable,
e.g. non-negativity
proj_P : function
projection onto the constraint set of the dual variable,
e.g. norm <= 1
tol : float (optional)
nonnegative parameter that gives the tolerance for convergence. If set
None, then the algorithm will run for a fixed number of iterations
Other Parameters
----------------
callback : callable, optional
Function called with the current iterate after each iteration.
"""
# Callback object
callback = kwargs.pop('callback', None)
if callback is not None and not callable(callback):
raise TypeError('`callback` {} is not callable'.format(callback))
factr = 1 / (grad.norm**2 * alpha)
q = p.copy()
x = data.space.zero()
t = 1.
if tol is None:
def convergence_eval(p1, p2):
return False
else:
def convergence_eval(p1, p2):
return (p1 - p2).norm() / p1.norm() < tol
pnew = p.copy()
if callback is not None:
callback(p)
for k in range(niter):
t0 = t
grad.adjoint(q, out=x)
proj_C(data - alpha * x, out=x)
grad(x, out=pnew)
pnew *= factr
pnew += q
proj_P(pnew, out=pnew)
converged = convergence_eval(p, pnew)
if not converged:
# update step size
t = (1 + np.sqrt(1 + 4 * t0 ** 2)) / 2.
# calculate next iterate
q[:] = pnew + (t0 - 1) / t * (pnew - p)
p[:] = pnew
if converged:
t = None
break
if callback is not None:
callback(p)
# get current image estimate
x = proj_C(data - alpha * grad.adjoint(p))
return x
class Blur2D(odl.Operator):
"""Blur operator"""
def __init__(self, domain, kernel, boundary_condition='wrap'):
"""Initialize a new instance.
"""
super().__init__(domain=domain, range=domain, linear=True)
self.__kernel = kernel
self.__boundary_condition = boundary_condition
@property
def kernel(self):
return self.__kernel
@property
def boundary_condition(self):
return self.__boundary_condition
def _call(self, x, out):
out[:] = scipy.signal.convolve2d(x, self.kernel, mode='same',
boundary='wrap')
@property
def gradient(self):
raise NotImplementedError('No yet implemented')
@property
def adjoint(self):
adjoint_kernel = self.kernel.copy().conj()
adjoint_kernel = np.fliplr(np.flipud(adjoint_kernel))
return Blur2D(self.domain, adjoint_kernel, self.boundary_condition)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.kernel,
self.boundary_condition)
class KullbackLeiblerSmooth(odl.solvers.Functional):
"""The smooth Kullback-Leibler divergence functional.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
smooth Kullback-Leibler functional :math:`\\phi` is defined as
.. math::
\\phi(x) = \\sum_{i=1}^n \\begin{cases}
x + r - y + y * \\log(y / (x + r))
& \\text{if $x \geq 0$} \\
(y / (2 * r^2)) * x^2 + (1 - y / r) * x + r - b +
b * \\log(b / r) & \\text{else}
\\end{cases}
where all variables on the right hand side of the equation have a subscript
i which is omitted for readability.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
"""
def __init__(self, space, data, background):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
data : ``space`` `element-like`
Data vector which has to be non-negative.
background : ``space`` `element-like`
Background vector which has to be non-negative.
"""
self.strong_convexity = 0
if background.ufuncs.less_equal(0).ufuncs.sum() > 0:
raise NotImplementedError('Background must be positive')
super().__init__(space=space, linear=False,
grad_lipschitz=np.max(data / background ** 2))
if data not in self.domain:
raise ValueError('`data` not in `domain`'
''.format(data, self.domain))
self.__data = data
self.__background = background
@property
def data(self):
"""The data in the Kullback-Leibler functional."""
return self.__data
@property
def background(self):
"""The background in the Kullback-Leibler functional."""
return self.__background
def _call(self, x):
"""Return the KL-diveregnce in the point ``x``.
If any components of ``x`` is non-positive, the value is positive
infinity.
"""
y = self.data
r = self.background
obj = self.domain.zero()
# x + r - y + y * log(y / (x + r)) = x - y * log(x + r) + c1
# with c1 = r - y + y * log y
i = x.ufuncs.greater_equal(0)
obj[i] = x[i] + r[i] - y[i]
j = y.ufuncs.greater(0)
k = i.ufuncs.logical_and(j)
obj[k] += y[k] * (y[k] / (x[k] + r[k])).ufuncs.log()
# (y / (2 * r^2)) * x^2 + (1 - y / r) * x + r - b + b * log(b / r)
# = (y / (2 * r^2)) * x^2 + (1 - y / r) * x + c2
# with c2 = r - b + b * log(b / r)
i = i.ufuncs.logical_not()
obj[i] += (y[i] / (2 * r[i]**2) * x[i]**2 + (1 - y[i] / r[i]) * x[i] +
r[i] - y[i])
k = i.ufuncs.logical_and(j)
obj[k] += y[k] * (y[k] / r[k]).ufuncs.log()
return obj.inner(self.domain.one())
@property
def gradient(self):
"""Gradient operator of the functional.
"""
raise NotImplementedError('No yet implemented')
@property
def proximal(self):
"""Return the `proximal factory` of the functional.
"""
raise NotImplementedError('No yet implemented')
@property
def convex_conj(self):
"""The convex conjugate functional of the KL-functional."""
return KullbackLeiblerSmoothConvexConj(self.domain, self.data,
self.background)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.data, self.background)
class KullbackLeiblerSmoothConvexConj(odl.solvers.Functional):
"""The convex conjugate of the smooth Kullback-Leibler divergence functional.
Notes
-----
If the functional is defined on an :math:`\mathbb{R}^n`-like space, the
convex conjugate of the smooth Kullback-Leibler functional :math:`\\phi^*`
is defined as
.. math::
\\phi^*(x) = \\sum_{i=1}^n \\begin{cases}
r^2 / (2 * y) * x^2 + (r - r^2 / y) * x + r^2 / (2 * y) +
3 / 2 * y - 2 * r - y * log(y / r)
& \\text{if $x < 1 - y / r$} \\
- r * x - y * log(1 - x)
& \\text{if $1 - y / r <= x < 1} \\
+ \infty
& \\text{else}
\\end{cases}
where all variables on the right hand side of the equation have a subscript
:math:`i` which is omitted for readability.
References
----------
[CERS2017] A. Chambolle, M. J. Ehrhardt, P. Richtarik and C.-B. Schoenlieb,
*Stochastic Primal-Dual Hybrid Gradient Algorithm with Arbitrary Sampling
and Imaging Applications*. ArXiv: http://arxiv.org/abs/1706.04957 (2017).
"""
def __init__(self, space, data, background):
"""Initialize a new instance.
Parameters
----------
space : `DiscreteLp` or `TensorSpace`
Domain of the functional.
data : ``space`` `element-like`
Data vector which has to be non-negative.
background : ``space`` `element-like`
Background vector which has to be non-negative.
"""
if background.ufuncs.less_equal(0).ufuncs.sum() > 0:
raise NotImplementedError('Background must be positive')
super().__init__(space=space, linear=False,
grad_lipschitz=np.inf)
if data is not None and data not in self.domain:
raise ValueError('`data` not in `domain`'
''.format(data, self.domain))
self.__data = data
self.__background = background
if np.min(self.data) == 0:
self.strong_convexity = np.inf
else:
self.strong_convexity = np.min(self.background**2 / self.data)
@property
def data(self):
"""The data in the Kullback-Leibler functional."""
return self.__data
@property
def background(self):
"""The background in the Kullback-Leibler functional."""
return self.__background
def _call(self, x):
"""Return the value in the point ``x``.
If any components of ``x`` is larger than or equal to 1, the value is
positive infinity.
"""
# TODO: cover properly the case data = 0
y = self.data
r = self.background
# if any element is greater or equal to one
if x.ufuncs.greater_equal(1).ufuncs.sum() > 0:
return np.inf
obj = self.domain.zero()
# out = sum(f)
# f =
# if x < 1 - y / r:
# r^2 / (2 * y) * x^2 + (r - r^2 / y) * x + r^2 / (2 * y) +
# 3 / 2 * y - 2 * r - y * log(y / r)
# if x >= 1 - y / r:
# - r * x - y * log(1 - x)
i = x.ufuncs.less(1 - y / r)
ry = r[i]**2 / y[i]
obj[i] += (ry / 2 * x[i]**2 + (r[i] - ry) * x[i] + ry / 2 +
3 / 2 * y[i] - 2 * r[i])
j = y.ufuncs.greater(0)
k = i.ufuncs.logical_and(j)
obj[k] -= y[k] * (y[k] / r[k]).ufuncs.log()
i = i.ufuncs.logical_not()
obj[i] -= r[i] * x[i]
k = i.ufuncs.logical_and(j)
obj[k] -= y[k] * (1 - x[k]).ufuncs.log()
return obj.inner(self.domain.one())
@property
def gradient(self):
"""Gradient operator of the functional."""
raise NotImplementedError('No yet implemented')
@property
def proximal(self):
space = self.domain
y = self.data
r = self.background
class ProxKullbackLeiblerSmoothConvexConj(odl.Operator):
"""Proximal operator of the convex conjugate of the smooth
Kullback-Leibler functional.
"""
def __init__(self, sigma):
"""Initialize a new instance.
Parameters
----------
sigma : positive float
Step size parameter
"""
self.sigma = float(sigma)
self.background = r
self.data = y
super().__init__(domain=space, range=space, linear=False)
def _call(self, x, out):
s = self.sigma
y = self.data
r = self.background
sr = s * r
sy = s * y
# out =
# if x < 1 - y / r:
# (y * x - s * r * y + s * r**2) / (y + s * r**2)
# if x >= 1 - y / r:
# 0.5 * (x + s * r + 1 -
# sqrt((x + s * r - 1)**2 + 4 * s * y)
i = x.ufuncs.less(1 - y / r)
# TODO: This may be faster without indexing on the GPU?
out[i] = ((y[i] * x[i] - sr[i] * y[i] + sr[i] * r[i]) /
(y[i] + sr[i] * r[i]))
i.ufuncs.logical_not(out=i)
out[i] = (x[i] + sr[i] + 1 -
((x[i] + sr[i] - 1) ** 2 + 4 * sy[i]).ufuncs.sqrt())
out[i] /= 2
return out
return ProxKullbackLeiblerSmoothConvexConj
@property
def convex_conj(self):
"""The convex conjugate functional of the smooth KL-functional."""
return KullbackLeiblerSmooth(self.domain, self.data,
self.background)
def __repr__(self):
"""Return ``repr(self)``."""
return '{}({!r}, {!r}, {!r})'.format(
self.__class__.__name__, self.domain, self.data, self.background)
| mpl-2.0 |
valexandersaulys/airbnb_kaggle_contest | venv/lib/python3.4/site-packages/pandas/tseries/tests/test_base.py | 9 | 82416 | from __future__ import print_function
import re
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
from pandas.tseries.base import DatetimeIndexOpsMixin
from pandas.util.testing import assertRaisesRegexp, assertIsInstance
from pandas.tseries.common import is_datetimelike
from pandas import (Series, Index, Int64Index, Timestamp, DatetimeIndex, PeriodIndex,
TimedeltaIndex, Timedelta, timedelta_range, date_range, Float64Index)
import pandas.tseries.offsets as offsets
import pandas.tslib as tslib
import nose
import pandas.util.testing as tm
from pandas.tests.test_base import Ops
class TestDatetimeIndexOps(Ops):
tz = [None, 'UTC', 'Asia/Tokyo', 'US/Eastern',
'dateutil/Asia/Singapore', 'dateutil/US/Pacific']
def setUp(self):
super(TestDatetimeIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['date','time','microsecond','nanosecond', 'is_month_start', 'is_month_end', 'is_quarter_start',
'is_quarter_end', 'is_year_start', 'is_year_end'], lambda x: isinstance(x,DatetimeIndex))
def test_ops_properties_basic(self):
# sanity check that the behavior didn't change
# GH7206
for op in ['year','day','second','weekday']:
self.assertRaises(TypeError, lambda x: getattr(self.dt_series,op))
# attribute access should still work!
s = Series(dict(year=2000,month=1,day=10))
self.assertEqual(s.year,2000)
self.assertEqual(s.month,1)
self.assertEqual(s.day,10)
self.assertRaises(AttributeError, lambda : s.weekday)
def test_astype_str(self):
# test astype string - #10442
result = date_range('2012-01-01', periods=4, name='test_name').astype(str)
expected = Index(['2012-01-01', '2012-01-02', '2012-01-03','2012-01-04'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with tz and name
result = date_range('2012-01-01', periods=3, name='test_name', tz='US/Eastern').astype(str)
expected = Index(['2012-01-01 00:00:00-05:00', '2012-01-02 00:00:00-05:00',
'2012-01-03 00:00:00-05:00'], name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and name
result = date_range('1/1/2011', periods=3, freq='H', name='test_name').astype(str)
expected = Index(['2011-01-01 00:00:00', '2011-01-01 01:00:00', '2011-01-01 02:00:00'],
name='test_name', dtype=object)
tm.assert_index_equal(result, expected)
# test astype string with freqH and timezone
result = date_range('3/6/2012 00:00', periods=2, freq='H',
tz='Europe/London', name='test_name').astype(str)
expected = Index(['2012-03-06 00:00:00+00:00', '2012-03-06 01:00:00+00:00'],
dtype=object, name='test_name')
tm.assert_index_equal(result, expected)
def test_asobject_tolist(self):
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Timestamp('2013-01-31'), pd.Timestamp('2013-02-28'),
pd.Timestamp('2013-03-31'), pd.Timestamp('2013-04-30')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = pd.date_range(start='2013-01-01', periods=4, freq='M', name='idx', tz='Asia/Tokyo')
expected_list = [pd.Timestamp('2013-01-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-02-28', tz='Asia/Tokyo'),
pd.Timestamp('2013-03-31', tz='Asia/Tokyo'),
pd.Timestamp('2013-04-30', tz='Asia/Tokyo')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = DatetimeIndex([datetime(2013, 1, 1), datetime(2013, 1, 2),
pd.NaT, datetime(2013, 1, 4)], name='idx')
expected_list = [pd.Timestamp('2013-01-01'), pd.Timestamp('2013-01-02'),
pd.NaT, pd.Timestamp('2013-01-04')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
for tz in self.tz:
# monotonic
idx1 = pd.DatetimeIndex(['2011-01-01', '2011-01-02',
'2011-01-03'], tz=tz)
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.DatetimeIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], tz=tz)
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Timestamp('2011-01-01', tz=tz))
self.assertEqual(idx.max(), pd.Timestamp('2011-01-03', tz=tz))
for op in ['min', 'max']:
# Return NaT
obj = DatetimeIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = DatetimeIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx = []
idx.append(DatetimeIndex([], freq='D'))
idx.append(DatetimeIndex(['2011-01-01'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern'))
idx.append(DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='UTC'))
exp = []
exp.append("""DatetimeIndex([], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='datetime64[ns]', freq='D')""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00+09:00', '2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00'], dtype='datetime64[ns, Asia/Tokyo]', freq='H')""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00-05:00', '2011-01-01 10:00:00-05:00', 'NaT'], dtype='datetime64[ns, US/Eastern]', freq=None)""")
exp.append("""DatetimeIndex(['2011-01-01 09:00:00+00:00', '2011-01-01 10:00:00+00:00', 'NaT'], dtype='datetime64[ns, UTC]', freq=None)""")
with pd.option_context('display.width', 300):
for indx, expected in zip(idx, exp):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(indx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
idx7 = DatetimeIndex(['2011-01-01 09:00', '2011-01-02 10:15'])
exp1 = """Series([], dtype: datetime64[ns])"""
exp2 = """0 2011-01-01
dtype: datetime64[ns]"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: datetime64[ns]"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: datetime64[ns]"""
exp5 = """0 2011-01-01 09:00:00+09:00
1 2011-01-01 10:00:00+09:00
2 2011-01-01 11:00:00+09:00
dtype: datetime64[ns, Asia/Tokyo]"""
exp6 = """0 2011-01-01 09:00:00-05:00
1 2011-01-01 10:00:00-05:00
2 NaT
dtype: datetime64[ns, US/Eastern]"""
exp7 = """0 2011-01-01 09:00:00
1 2011-01-02 10:15:00
dtype: datetime64[ns]"""
with pd.option_context('display.width', 300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7]):
result = repr(Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = DatetimeIndex([], freq='D')
idx2 = DatetimeIndex(['2011-01-01'], freq='D')
idx3 = DatetimeIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo')
idx6 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', pd.NaT],
tz='US/Eastern')
exp1 = """DatetimeIndex: 0 entries
Freq: D"""
exp2 = """DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 to 2011-01-01 11:00:00+09:00
Freq: H"""
exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6],
[exp1, exp2, exp3, exp4, exp5, exp6]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
for tz in [None, 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range(start='2013-04-01', periods=30, freq=freq, tz=tz)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
for tz in self.tz:
# union
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=10, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=8, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00', '2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng + 1
expected = pd.date_range('2000-01-01 10:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
for tz in self.tz:
# diff
rng1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other1 = pd.date_range('1/6/2000', freq='D', periods=5, tz=tz)
expected1 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
rng2 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other2 = pd.date_range('1/4/2000', freq='D', periods=5, tz=tz)
expected2 = pd.date_range('1/1/2000', freq='D', periods=3, tz=tz)
rng3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
other3 = pd.DatetimeIndex([], tz=tz)
expected3 = pd.date_range('1/1/2000', freq='D', periods=5, tz=tz)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3)]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng - delta
expected = pd.date_range('1999-12-31 22:00', '2000-01-31 22:00', tz=tz)
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = pd.date_range('2000-01-01 09:00', freq='H', periods=10, tz=tz)
result = rng - 1
expected = pd.date_range('2000-01-01 08:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
for tz in [None, 'UTC', 'Asia/Tokyo', 'US/Eastern']:
idx = pd.date_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = DatetimeIndex(np.repeat(idx.values, range(1, len(idx) + 1)), tz=tz)
exp_idx = pd.date_range('2011-01-01 18:00', freq='-1H', periods=10, tz=tz)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.date_range('2011-01-01 09:00', freq='H', periods=10, tz=tz)
tm.assert_index_equal(idx.unique(), expected)
idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], tz=tz)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00'], tz=tz)
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = DatetimeIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], tz=tz)
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(DatetimeIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['2015', '2015', '2016'], ['2015', '2015', '2014'])):
tm.assertIn(idx[0], idx)
def test_order(self):
# with freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D', name='idx')
idx2 = DatetimeIndex(['2011-01-01 09:00', '2011-01-01 10:00', '2011-01-01 11:00'],
freq='H', tz='Asia/Tokyo', name='tzidx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
# without freq
idx1 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], name='idx1')
exp1 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], name='idx1')
idx2 = DatetimeIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
tz='Asia/Tokyo', name='idx2')
exp2 = DatetimeIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
tz='Asia/Tokyo', name='idx2')
idx3 = DatetimeIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], name='idx3')
exp3 = DatetimeIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx[0]
self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
result = idx[0:5]
expected = pd.date_range('2011-01-01', '2011-01-05', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.date_range('2011-01-01', '2011-01-09', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.date_range('2011-01-12', '2011-01-24', freq='3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = DatetimeIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='-1D', tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_take(self):
#GH 10295
idx1 = pd.date_range('2011-01-01', '2011-01-31', freq='D', name='idx')
idx2 = pd.date_range('2011-01-01', '2011-01-31', freq='D', tz='Asia/Tokyo', name='idx')
for idx in [idx1, idx2]:
result = idx.take([0])
self.assertEqual(result, pd.Timestamp('2011-01-01', tz=idx.tz))
result = idx.take([0, 1, 2])
expected = pd.date_range('2011-01-01', '2011-01-03', freq='D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.date_range('2011-01-01', '2011-01-05', freq='2D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.date_range('2011-01-08', '2011-01-02', freq='-3D',
tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = DatetimeIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq=None, tz=idx.tz, name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_infer_freq(self):
# GH 11018
for freq in ['A', '2A', '-2A', 'Q', '-1Q', 'M', '-1M', 'D', '3D', '-3D',
'W', '-1W', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']:
idx = pd.date_range('2011-01-01 09:00:00', freq=freq, periods=10)
result = pd.DatetimeIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
class TestTimedeltaIndexOps(Ops):
def setUp(self):
super(TestTimedeltaIndexOps, self).setUp()
mask = lambda x: isinstance(x, TimedeltaIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ ]
def test_ops_properties(self):
self.check_ops_properties(['days','hours','minutes','seconds','milliseconds'])
self.check_ops_properties(['microseconds','nanoseconds'])
def test_asobject_tolist(self):
idx = timedelta_range(start='1 days', periods=4, freq='D', name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),Timedelta('3 days'),
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = TimedeltaIndex([timedelta(days=1),timedelta(days=2),pd.NaT,
timedelta(days=4)], name='idx')
expected_list = [Timedelta('1 days'),Timedelta('2 days'),pd.NaT,
Timedelta('4 days')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
def test_minmax(self):
# monotonic
idx1 = TimedeltaIndex(['1 days', '2 days', '3 days'])
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = TimedeltaIndex(['1 days', np.nan, '3 days', 'NaT'])
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), Timedelta('1 days')),
self.assertEqual(idx.max(), Timedelta('3 days')),
for op in ['min', 'max']:
# Return NaT
obj = TimedeltaIndex([])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
obj = TimedeltaIndex([pd.NaT, pd.NaT, pd.NaT])
self.assertTrue(pd.isnull(getattr(obj, op)()))
def test_representation(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex([], dtype='timedelta64[ns]', freq='D')"""
exp2 = """TimedeltaIndex(['1 days'], dtype='timedelta64[ns]', freq='D')"""
exp3 = """TimedeltaIndex(['1 days', '2 days'], dtype='timedelta64[ns]', freq='D')"""
exp4 = """TimedeltaIndex(['1 days', '2 days', '3 days'], dtype='timedelta64[ns]', freq='D')"""
exp5 = """TimedeltaIndex(['1 days 00:00:01', '2 days 00:00:00', '3 days 00:00:00'], dtype='timedelta64[ns]', freq=None)"""
with pd.option_context('display.width',300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """Series([], dtype: timedelta64[ns])"""
exp2 = """0 1 days
dtype: timedelta64[ns]"""
exp3 = """0 1 days
1 2 days
dtype: timedelta64[ns]"""
exp4 = """0 1 days
1 2 days
2 3 days
dtype: timedelta64[ns]"""
exp5 = """0 1 days 00:00:01
1 2 days 00:00:00
2 3 days 00:00:00
dtype: timedelta64[ns]"""
with pd.option_context('display.width',300):
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = TimedeltaIndex([], freq='D')
idx2 = TimedeltaIndex(['1 days'], freq='D')
idx3 = TimedeltaIndex(['1 days', '2 days'], freq='D')
idx4 = TimedeltaIndex(['1 days', '2 days', '3 days'], freq='D')
idx5 = TimedeltaIndex(['1 days 00:00:01', '2 days', '3 days'])
exp1 = """TimedeltaIndex: 0 entries
Freq: D"""
exp2 = """TimedeltaIndex: 1 entries, 1 days to 1 days
Freq: D"""
exp3 = """TimedeltaIndex: 2 entries, 1 days to 2 days
Freq: D"""
exp4 = """TimedeltaIndex: 3 entries, 1 days to 3 days
Freq: D"""
exp5 = """TimedeltaIndex: 3 entries, 1 days 00:00:01 to 3 days 00:00:00"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5],
[exp1, exp2, exp3, exp4, exp5]):
result = idx.summary()
self.assertEqual(result, expected)
def test_add_iadd(self):
# only test adding/sub offsets as + is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng + delta
expected = timedelta_range('1 days 02:00:00','10 days 02:00:00',freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng + 1
expected = timedelta_range('1 days 10:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# only test adding/sub offsets as - is now numeric
# offset
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
for delta in offsets:
rng = timedelta_range('1 days','10 days')
result = rng - delta
expected = timedelta_range('0 days 22:00:00', '9 days 22:00:00')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
# int
rng = timedelta_range('1 days 09:00:00', freq='H', periods=10)
result = rng - 1
expected = timedelta_range('1 days 08:00:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_ops_compat(self):
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
Timedelta(hours=2)]
rng = timedelta_range('1 days','10 days',name='foo')
# multiply
for offset in offsets:
self.assertRaises(TypeError, lambda : rng * offset)
# divide
expected = Int64Index((np.arange(10)+1)*12,name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# divide with nats
rng = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
expected = Float64Index([12, np.nan, 24], name='foo')
for offset in offsets:
result = rng / offset
tm.assert_index_equal(result,expected)
# don't allow division by NaT (make could in the future)
self.assertRaises(TypeError, lambda : rng / pd.NaT)
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
self.assertRaises(TypeError, lambda : tdi - dt)
self.assertRaises(TypeError, lambda : tdi - dti)
self.assertRaises(TypeError, lambda : td - dt)
self.assertRaises(TypeError, lambda : td - dti)
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = date_range('20130101',periods=3)
ts = Timestamp('20130101')
dt = ts.to_datetime()
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_datetime()
td = Timedelta('1 days')
def _check(result, expected):
self.assertEqual(result,expected)
self.assertIsInstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
self.assertRaises(TypeError, lambda : dt_tz - ts)
self.assertRaises(TypeError, lambda : dt_tz - dt)
self.assertRaises(TypeError, lambda : dt_tz - ts_tz2)
self.assertRaises(TypeError, lambda : dt - dt_tz)
self.assertRaises(TypeError, lambda : ts - dt_tz)
self.assertRaises(TypeError, lambda : ts_tz2 - ts)
self.assertRaises(TypeError, lambda : ts_tz2 - dt)
self.assertRaises(TypeError, lambda : ts_tz - ts_tz2)
# with dti
self.assertRaises(TypeError, lambda : dti - ts_tz)
self.assertRaises(TypeError, lambda : dti_tz - ts)
self.assertRaises(TypeError, lambda : dti_tz - ts_tz2)
result = dti_tz-dt_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = dt_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = dti_tz-ts_tz
expected = TimedeltaIndex(['0 days','1 days','2 days'])
tm.assert_index_equal(result,expected)
result = ts_tz-dti_tz
expected = TimedeltaIndex(['0 days','-1 days','-2 days'])
tm.assert_index_equal(result,expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(['20121231','20130101','20130102'],tz='US/Eastern')
tm.assert_index_equal(result,expected)
def test_dti_dti_deprecated_ops(self):
# deprecated in 0.16.0 (GH9094)
# change to return subtraction -> TimeDeltaIndex in 0.17.0
# shoudl move to the appropriate sections above
dti = date_range('20130101',periods=3)
dti_tz = date_range('20130101',periods=3).tz_localize('US/Eastern')
with tm.assert_produces_warning(FutureWarning):
result = dti-dti
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti+dti
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti_tz
expected = Index([])
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz+dti_tz
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti_tz-dti
expected = dti_tz
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
result = dti-dti_tz
expected = dti
tm.assert_index_equal(result,expected)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti_tz+dti)
with tm.assert_produces_warning(FutureWarning):
self.assertRaises(TypeError, lambda : dti+dti_tz)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '4 days'], name='foo')
tm.assert_index_equal(result, expected)
result = dti - tdi # name will be reset
expected = DatetimeIndex(['20121231', pd.NaT, '20130101'])
tm.assert_index_equal(result, expected)
def test_addition_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
result = tdi + dt
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = dt + tdi
expected = DatetimeIndex(['20130102', pd.NaT, '20130103'], name='foo')
tm.assert_index_equal(result, expected)
result = td + tdi
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
result = tdi + td
expected = TimedeltaIndex(['2 days', pd.NaT, '3 days'], name='foo')
tm.assert_index_equal(result, expected)
# unequal length
self.assertRaises(ValueError, lambda : tdi + dti[0:1])
self.assertRaises(ValueError, lambda : tdi[0:1] + dti)
# random indexes
self.assertRaises(TypeError, lambda : tdi + Int64Index([1,2,3]))
# this is a union!
#self.assertRaises(TypeError, lambda : Int64Index([1,2,3]) + tdi)
result = tdi + dti # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dti + tdi # name will be reset
expected = DatetimeIndex(['20130102', pd.NaT, '20130105'])
tm.assert_index_equal(result, expected)
result = dt + td
expected = Timestamp('20130102')
self.assertEqual(result, expected)
result = td + dt
expected = Timestamp('20130102')
self.assertEqual(result, expected)
def test_value_counts_unique(self):
# GH 7735
idx = timedelta_range('1 days 09:00:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = TimedeltaIndex(np.repeat(idx.values, range(1, len(idx) + 1)))
exp_idx = timedelta_range('1 days 18:00:00', freq='-1H', periods=10)
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = timedelta_range('1 days 09:00:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = TimedeltaIndex(['1 days 09:00:00', '1 days 09:00:00', '1 days 09:00:00',
'1 days 08:00:00', '1 days 08:00:00', pd.NaT])
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00'])
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = TimedeltaIndex(['1 days 09:00:00', '1 days 08:00:00', pd.NaT])
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_nonunique_contains(self):
# GH 9512
for idx in map(TimedeltaIndex, ([0, 1, 0], [0, 0, -1], [0, -1, -1],
['00:01:00', '00:01:00', '00:02:00'],
['00:01:00', '00:01:00', '00:00:01'])):
tm.assertIn(idx[0], idx)
def test_unknown_attribute(self):
#GH 9680
tdi = pd.timedelta_range(start=0,periods=10,freq='1s')
ts = pd.Series(np.random.normal(size=10),index=tdi)
self.assertNotIn('foo',ts.__dict__.keys())
self.assertRaises(AttributeError,lambda : ts.foo)
def test_order(self):
#GH 10295
idx1 = TimedeltaIndex(['1 day', '2 day', '3 day'], freq='D', name='idx')
idx2 = TimedeltaIndex(['1 hour', '2 hour', '3 hour'], freq='H', name='idx')
for idx in [idx1, idx2]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq.n, -1)
idx1 = TimedeltaIndex(['1 hour', '3 hour', '5 hour',
'2 hour ', '1 hour'], name='idx1')
exp1 = TimedeltaIndex(['1 hour', '1 hour', '2 hour',
'3 hour', '5 hour'], name='idx1')
idx2 = TimedeltaIndex(['1 day', '3 day', '5 day',
'2 day', '1 day'], name='idx2')
exp2 = TimedeltaIndex(['1 day', '1 day', '2 day',
'3 day', '5 day'], name='idx2')
idx3 = TimedeltaIndex([pd.NaT, '3 minute', '5 minute',
'2 minute', pd.NaT], name='idx3')
exp3 = TimedeltaIndex([pd.NaT, pd.NaT, '2 minute', '3 minute',
'5 minute'], name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertIsNone(ordered.freq)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertIsNone(ordered.freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertIsNone(ordered.freq)
def test_getitem(self):
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx[0:5]
expected = pd.timedelta_range('1 day', '5 day', freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[0:10:2]
expected = pd.timedelta_range('1 day', '9 day', freq='2D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[-20:-5:3]
expected = pd.timedelta_range('12 day', '24 day', freq='3D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx[4::-1]
expected = TimedeltaIndex(['5 day', '4 day', '3 day', '2 day', '1 day'],
freq='-1D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx)
self.assertIsNone(idx_dup.freq) # freq is reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertIsNone(result.freq)
def test_take(self):
#GH 10295
idx1 = pd.timedelta_range('1 day', '31 day', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Timedelta('1 day'))
result = idx.take([-1])
self.assertEqual(result, pd.Timedelta('31 day'))
result = idx.take([0, 1, 2])
expected = pd.timedelta_range('1 day', '3 day', freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.timedelta_range('1 day', '5 day', freq='2D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([7, 4, 1])
expected = pd.timedelta_range('8 day', '2 day', freq='-3D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
result = idx.take([3, 2, 5])
expected = TimedeltaIndex(['4 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
result = idx.take([-3, 2, 5])
expected = TimedeltaIndex(['29 day', '3 day', '6 day'], name='idx')
self.assert_index_equal(result, expected)
self.assertIsNone(result.freq)
def test_infer_freq(self):
# GH 11018
for freq in ['D', '3D', '-3D', 'H', '2H', '-2H', 'T', '2T', 'S', '-3S']:
idx = pd.timedelta_range('1', freq=freq, periods=10)
result = pd.TimedeltaIndex(idx.asi8, freq='infer')
tm.assert_index_equal(idx, result)
self.assertEqual(result.freq, freq)
class TestPeriodIndexOps(Ops):
def setUp(self):
super(TestPeriodIndexOps, self).setUp()
mask = lambda x: isinstance(x, DatetimeIndex) or isinstance(x, PeriodIndex)
self.is_valid_objs = [ o for o in self.objs if mask(o) ]
self.not_valid_objs = [ o for o in self.objs if not mask(o) ]
def test_ops_properties(self):
self.check_ops_properties(['year','month','day','hour','minute','second','weekofyear','week','dayofweek','dayofyear','quarter'])
self.check_ops_properties(['qyear'], lambda x: isinstance(x,PeriodIndex))
def test_asobject_tolist(self):
idx = pd.period_range(start='2013-01-01', periods=4, freq='M', name='idx')
expected_list = [pd.Period('2013-01-31', freq='M'), pd.Period('2013-02-28', freq='M'),
pd.Period('2013-03-31', freq='M'), pd.Period('2013-04-30', freq='M')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
self.assertTrue(result.equals(expected))
self.assertEqual(result.name, expected.name)
self.assertEqual(idx.tolist(), expected_list)
idx = PeriodIndex(['2013-01-01', '2013-01-02', 'NaT', '2013-01-04'], freq='D', name='idx')
expected_list = [pd.Period('2013-01-01', freq='D'), pd.Period('2013-01-02', freq='D'),
pd.Period('NaT', freq='D'), pd.Period('2013-01-04', freq='D')]
expected = pd.Index(expected_list, dtype=object, name='idx')
result = idx.asobject
self.assertTrue(isinstance(result, Index))
self.assertEqual(result.dtype, object)
for i in [0, 1, 3]:
self.assertTrue(result[i], expected[i])
self.assertTrue(result[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result[2].freq, 'D')
self.assertEqual(result.name, expected.name)
result_list = idx.tolist()
for i in [0, 1, 3]:
self.assertTrue(result_list[i], expected_list[i])
self.assertTrue(result_list[2].ordinal, pd.tslib.iNaT)
self.assertTrue(result_list[2].freq, 'D')
def test_minmax(self):
# monotonic
idx1 = pd.PeriodIndex([pd.NaT, '2011-01-01', '2011-01-02',
'2011-01-03'], freq='D')
self.assertTrue(idx1.is_monotonic)
# non-monotonic
idx2 = pd.PeriodIndex(['2011-01-01', pd.NaT, '2011-01-03',
'2011-01-02', pd.NaT], freq='D')
self.assertFalse(idx2.is_monotonic)
for idx in [idx1, idx2]:
self.assertEqual(idx.min(), pd.Period('2011-01-01', freq='D'))
self.assertEqual(idx.max(), pd.Period('2011-01-03', freq='D'))
for op in ['min', 'max']:
# Return NaT
obj = PeriodIndex([], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
obj = PeriodIndex([pd.NaT, pd.NaT, pd.NaT], freq='M')
result = getattr(obj, op)()
self.assertEqual(result.ordinal, tslib.iNaT)
self.assertEqual(result.freq, 'M')
def test_representation(self):
# GH 7601
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex([], dtype='int64', freq='D')"""
exp2 = """PeriodIndex(['2011-01-01'], dtype='int64', freq='D')"""
exp3 = """PeriodIndex(['2011-01-01', '2011-01-02'], dtype='int64', freq='D')"""
exp4 = """PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], dtype='int64', freq='D')"""
exp5 = """PeriodIndex(['2011', '2012', '2013'], dtype='int64', freq='A-DEC')"""
exp6 = """PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], dtype='int64', freq='H')"""
exp7 = """PeriodIndex(['2013Q1'], dtype='int64', freq='Q-DEC')"""
exp8 = """PeriodIndex(['2013Q1', '2013Q2'], dtype='int64', freq='Q-DEC')"""
exp9 = """PeriodIndex(['2013Q1', '2013Q2', '2013Q3'], dtype='int64', freq='Q-DEC')"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
for func in ['__repr__', '__unicode__', '__str__']:
result = getattr(idx, func)()
self.assertEqual(result, expected)
def test_representation_to_series(self):
# GH 10971
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """Series([], dtype: object)"""
exp2 = """0 2011-01-01
dtype: object"""
exp3 = """0 2011-01-01
1 2011-01-02
dtype: object"""
exp4 = """0 2011-01-01
1 2011-01-02
2 2011-01-03
dtype: object"""
exp5 = """0 2011
1 2012
2 2013
dtype: object"""
exp6 = """0 2011-01-01 09:00
1 2012-02-01 10:00
2 NaT
dtype: object"""
exp7 = """0 2013Q1
dtype: object"""
exp8 = """0 2013Q1
1 2013Q2
dtype: object"""
exp9 = """0 2013Q1
1 2013Q2
2 2013Q3
dtype: object"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = repr(pd.Series(idx))
self.assertEqual(result, expected)
def test_summary(self):
# GH9116
idx1 = PeriodIndex([], freq='D')
idx2 = PeriodIndex(['2011-01-01'], freq='D')
idx3 = PeriodIndex(['2011-01-01', '2011-01-02'], freq='D')
idx4 = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'], freq='D')
idx5 = PeriodIndex(['2011', '2012', '2013'], freq='A')
idx6 = PeriodIndex(['2011-01-01 09:00', '2012-02-01 10:00', 'NaT'], freq='H')
idx7 = pd.period_range('2013Q1', periods=1, freq="Q")
idx8 = pd.period_range('2013Q1', periods=2, freq="Q")
idx9 = pd.period_range('2013Q1', periods=3, freq="Q")
exp1 = """PeriodIndex: 0 entries
Freq: D"""
exp2 = """PeriodIndex: 1 entries, 2011-01-01 to 2011-01-01
Freq: D"""
exp3 = """PeriodIndex: 2 entries, 2011-01-01 to 2011-01-02
Freq: D"""
exp4 = """PeriodIndex: 3 entries, 2011-01-01 to 2011-01-03
Freq: D"""
exp5 = """PeriodIndex: 3 entries, 2011 to 2013
Freq: A-DEC"""
exp6 = """PeriodIndex: 3 entries, 2011-01-01 09:00 to NaT
Freq: H"""
exp7 = """PeriodIndex: 1 entries, 2013Q1 to 2013Q1
Freq: Q-DEC"""
exp8 = """PeriodIndex: 2 entries, 2013Q1 to 2013Q2
Freq: Q-DEC"""
exp9 = """PeriodIndex: 3 entries, 2013Q1 to 2013Q3
Freq: Q-DEC"""
for idx, expected in zip([idx1, idx2, idx3, idx4, idx5, idx6, idx7, idx8, idx9],
[exp1, exp2, exp3, exp4, exp5, exp6, exp7, exp8, exp9]):
result = idx.summary()
self.assertEqual(result, expected)
def test_resolution(self):
for freq, expected in zip(['A', 'Q', 'M', 'D', 'H', 'T', 'S', 'L', 'U'],
['day', 'day', 'day', 'day',
'hour', 'minute', 'second', 'millisecond', 'microsecond']):
idx = pd.period_range(start='2013-04-01', periods=30, freq=freq)
self.assertEqual(idx.resolution, expected)
def test_add_iadd(self):
# union
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=10)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=8)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = pd.PeriodIndex(['2000-01-01 09:00', '2000-01-01 10:00',
'2000-01-01 11:00', '2000-01-01 12:00',
'2000-01-01 13:00', '2000-01-02 09:00',
'2000-01-02 10:00', '2000-01-02 11:00',
'2000-01-02 12:00', '2000-01-02 13:00'],
freq='H')
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'
'2000-01-01 09:08'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05', '2000-01-01 09:08'],
freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=10)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('1998-01-01', freq='A', periods=10)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7)]:
# GH9094
with tm.assert_produces_warning(FutureWarning):
result_add = rng + other
result_union = rng.union(other)
tm.assert_index_equal(result_add, expected)
tm.assert_index_equal(result_union, expected)
# GH 6527
# GH9094
with tm.assert_produces_warning(FutureWarning):
rng += other
tm.assert_index_equal(rng, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng + pd.offsets.YearEnd(5)
expected = pd.period_range('2019', '2029', freq='A')
tm.assert_index_equal(result, expected)
rng += pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
with tm.assertRaisesRegexp(ValueError, 'Input has different freq from Period'):
rng + o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng + pd.offsets.MonthEnd(5)
expected = pd.period_range('2014-06', '2017-05', freq='M')
tm.assert_index_equal(result, expected)
rng += pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365), Timedelta(days=365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng + o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3),
np.timedelta64(72, 'h'), Timedelta('72:00:00')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng + delta
expected = pd.period_range('2014-05-04', '2014-05-18', freq='D')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23), Timedelta('23:00:00')]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng + o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120),
np.timedelta64(120, 'm'), Timedelta(minutes=120)]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng + delta
expected = pd.period_range('2014-01-01 12:00', '2014-01-05 12:00', freq='H')
tm.assert_index_equal(result, expected)
rng += delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30),
np.timedelta64(30, 's'), Timedelta(seconds=30)]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng + 1
expected = pd.period_range('2000-01-01 10:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng += 1
tm.assert_index_equal(rng, expected)
def test_sub_isub(self):
# diff
rng1 = pd.period_range('1/1/2000', freq='D', periods=5)
other1 = pd.period_range('1/6/2000', freq='D', periods=5)
expected1 = pd.period_range('1/1/2000', freq='D', periods=5)
rng2 = pd.period_range('1/1/2000', freq='D', periods=5)
other2 = pd.period_range('1/4/2000', freq='D', periods=5)
expected2 = pd.period_range('1/1/2000', freq='D', periods=3)
rng3 = pd.period_range('1/1/2000', freq='D', periods=5)
other3 = pd.PeriodIndex([], freq='D')
expected3 = pd.period_range('1/1/2000', freq='D', periods=5)
rng4 = pd.period_range('2000-01-01 09:00', freq='H', periods=5)
other4 = pd.period_range('2000-01-02 09:00', freq='H', periods=5)
expected4 = rng4
rng5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:03',
'2000-01-01 09:05'], freq='T')
other5 = pd.PeriodIndex(['2000-01-01 09:01', '2000-01-01 09:05'], freq='T')
expected5 = pd.PeriodIndex(['2000-01-01 09:03'], freq='T')
rng6 = pd.period_range('2000-01-01', freq='M', periods=7)
other6 = pd.period_range('2000-04-01', freq='M', periods=7)
expected6 = pd.period_range('2000-01-01', freq='M', periods=3)
rng7 = pd.period_range('2003-01-01', freq='A', periods=5)
other7 = pd.period_range('1998-01-01', freq='A', periods=8)
expected7 = pd.period_range('2006-01-01', freq='A', periods=2)
for rng, other, expected in [(rng1, other1, expected1), (rng2, other2, expected2),
(rng3, other3, expected3), (rng4, other4, expected4),
(rng5, other5, expected5), (rng6, other6, expected6),
(rng7, other7, expected7),]:
result_union = rng.difference(other)
tm.assert_index_equal(result_union, expected)
# offset
# DateOffset
rng = pd.period_range('2014', '2024', freq='A')
result = rng - pd.offsets.YearEnd(5)
expected = pd.period_range('2009', '2019', freq='A')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.YearEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014', '2024', freq='A')
msg = 'Input has different freq from PeriodIndex\\(freq=A-DEC\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
rng = pd.period_range('2014-01', '2016-12', freq='M')
result = rng - pd.offsets.MonthEnd(5)
expected = pd.period_range('2013-08', '2016-07', freq='M')
tm.assert_index_equal(result, expected)
rng -= pd.offsets.MonthEnd(5)
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(365, 'D'), timedelta(365)]:
rng = pd.period_range('2014-01', '2016-12', freq='M')
msg = 'Input has different freq from PeriodIndex\\(freq=M\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
# Tick
offsets = [pd.offsets.Day(3), timedelta(days=3), np.timedelta64(3, 'D'),
pd.offsets.Hour(72), timedelta(minutes=60*24*3), np.timedelta64(72, 'h')]
for delta in offsets:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
result = rng - delta
expected = pd.period_range('2014-04-28', '2014-05-12', freq='D')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for o in [pd.offsets.YearBegin(2), pd.offsets.MonthBegin(1), pd.offsets.Minute(),
np.timedelta64(4, 'h'), timedelta(hours=23)]:
rng = pd.period_range('2014-05-01', '2014-05-15', freq='D')
msg = 'Input has different freq from PeriodIndex\\(freq=D\\)'
with tm.assertRaisesRegexp(ValueError, msg):
rng - o
offsets = [pd.offsets.Hour(2), timedelta(hours=2), np.timedelta64(2, 'h'),
pd.offsets.Minute(120), timedelta(minutes=120), np.timedelta64(120, 'm')]
for delta in offsets:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
result = rng - delta
expected = pd.period_range('2014-01-01 08:00', '2014-01-05 08:00', freq='H')
tm.assert_index_equal(result, expected)
rng -= delta
tm.assert_index_equal(rng, expected)
for delta in [pd.offsets.YearBegin(2), timedelta(minutes=30), np.timedelta64(30, 's')]:
rng = pd.period_range('2014-01-01 10:00', '2014-01-05 10:00', freq='H')
msg = 'Input has different freq from PeriodIndex\\(freq=H\\)'
with tm.assertRaisesRegexp(ValueError, msg):
result = rng + delta
with tm.assertRaisesRegexp(ValueError, msg):
rng += delta
# int
rng = pd.period_range('2000-01-01 09:00', freq='H', periods=10)
result = rng - 1
expected = pd.period_range('2000-01-01 08:00', freq='H', periods=10)
tm.assert_index_equal(result, expected)
rng -= 1
tm.assert_index_equal(rng, expected)
def test_value_counts_unique(self):
# GH 7735
idx = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
# create repeated values, 'n'th element is repeated by n+1 times
idx = PeriodIndex(np.repeat(idx.values, range(1, len(idx) + 1)), freq='H')
exp_idx = PeriodIndex(['2011-01-01 18:00', '2011-01-01 17:00', '2011-01-01 16:00',
'2011-01-01 15:00', '2011-01-01 14:00', '2011-01-01 13:00',
'2011-01-01 12:00', '2011-01-01 11:00', '2011-01-01 10:00',
'2011-01-01 09:00'], freq='H')
expected = Series(range(10, 0, -1), index=exp_idx, dtype='int64')
tm.assert_series_equal(idx.value_counts(), expected)
expected = pd.period_range('2011-01-01 09:00', freq='H', periods=10)
tm.assert_index_equal(idx.unique(), expected)
idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 09:00', '2013-01-01 09:00',
'2013-01-01 08:00', '2013-01-01 08:00', pd.NaT], freq='H')
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00'], freq='H')
expected = Series([3, 2], index=exp_idx)
tm.assert_series_equal(idx.value_counts(), expected)
exp_idx = PeriodIndex(['2013-01-01 09:00', '2013-01-01 08:00', pd.NaT], freq='H')
expected = Series([3, 2, 1], index=exp_idx)
tm.assert_series_equal(idx.value_counts(dropna=False), expected)
tm.assert_index_equal(idx.unique(), exp_idx)
def test_drop_duplicates_metadata(self):
#GH 10115
idx = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
result = idx.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
idx_dup = idx.append(idx) # freq will not be reset
result = idx_dup.drop_duplicates()
self.assert_index_equal(idx, result)
self.assertEqual(idx.freq, result.freq)
def test_order_compat(self):
def _check_freq(index, expected_index):
if isinstance(index, PeriodIndex):
self.assertEqual(index.freq, expected_index.freq)
pidx = PeriodIndex(['2011', '2012', '2013'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2012, 2013], name='idx')
for idx in [pidx, iidx]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, idx[::-1])
_check_freq(ordered, idx[::-1])
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, idx[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
_check_freq(ordered, idx[::-1])
pidx = PeriodIndex(['2011', '2013', '2015', '2012', '2011'], name='pidx', freq='A')
pexpected = PeriodIndex(['2011', '2011', '2012', '2013', '2015'], name='pidx', freq='A')
# for compatibility check
iidx = Index([2011, 2013, 2015, 2012, 2011], name='idx')
iexpected = Index([2011, 2011, 2012, 2013, 2015], name='idx')
for idx, expected in [(pidx, pexpected), (iidx, iexpected)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
_check_freq(ordered, idx)
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
_check_freq(ordered, idx)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
_check_freq(ordered, idx)
pidx = PeriodIndex(['2011', '2013', 'NaT', '2011'], name='pidx', freq='D')
result = pidx.sort_values()
expected = PeriodIndex(['NaT', '2011', '2011', '2013'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
result = pidx.sort_values(ascending=False)
expected = PeriodIndex(['2013', '2011', '2011', 'NaT'], name='pidx', freq='D')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
def test_order(self):
for freq in ['D', '2D', '4D']:
idx = PeriodIndex(['2011-01-01', '2011-01-02', '2011-01-03'],
freq=freq, name='idx')
ordered = idx.sort_values()
self.assert_index_equal(ordered, idx)
self.assertEqual(ordered.freq, idx.freq)
ordered = idx.sort_values(ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, idx)
self.assert_numpy_array_equal(indexer, np.array([0, 1, 2]))
self.assertEqual(ordered.freq, idx.freq)
self.assertEqual(ordered.freq, freq)
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
expected = idx[::-1]
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([2, 1, 0]))
self.assertEqual(ordered.freq, expected.freq)
self.assertEqual(ordered.freq, freq)
idx1 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'], freq='D', name='idx1')
exp1 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'], freq='D', name='idx1')
idx2 = PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-02', '2011-01-01'],
freq='D', name='idx2')
exp2 = PeriodIndex(['2011-01-01', '2011-01-01', '2011-01-02',
'2011-01-03', '2011-01-05'],
freq='D', name='idx2')
idx3 = PeriodIndex([pd.NaT, '2011-01-03', '2011-01-05',
'2011-01-02', pd.NaT], freq='D', name='idx3')
exp3 = PeriodIndex([pd.NaT, pd.NaT, '2011-01-02', '2011-01-03',
'2011-01-05'], freq='D', name='idx3')
for idx, expected in [(idx1, exp1), (idx1, exp1), (idx1, exp1)]:
ordered = idx.sort_values()
self.assert_index_equal(ordered, expected)
self.assertEqual(ordered.freq, 'D')
ordered = idx.sort_values(ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True)
self.assert_index_equal(ordered, expected)
self.assert_numpy_array_equal(indexer, np.array([0, 4, 3, 1, 2]))
self.assertEqual(ordered.freq, 'D')
ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)
self.assert_index_equal(ordered, expected[::-1])
self.assert_numpy_array_equal(indexer, np.array([2, 1, 3, 4, 0]))
self.assertEqual(ordered.freq, 'D')
def test_getitem(self):
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
for idx in [idx1]:
result = idx[0]
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx[-1]
self.assertEqual(result, pd.Period('2011-01-31', freq='D'))
result = idx[0:5]
expected = pd.period_range('2011-01-01', '2011-01-05', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[0:10:2]
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05',
'2011-01-07', '2011-01-09'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[-20:-5:3]
expected = pd.PeriodIndex(['2011-01-12', '2011-01-15', '2011-01-18',
'2011-01-21', '2011-01-24'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx[4::-1]
expected = PeriodIndex(['2011-01-05', '2011-01-04', '2011-01-03',
'2011-01-02', '2011-01-01'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
def test_take(self):
#GH 10295
idx1 = pd.period_range('2011-01-01', '2011-01-31', freq='D', name='idx')
for idx in [idx1]:
result = idx.take([0])
self.assertEqual(result, pd.Period('2011-01-01', freq='D'))
result = idx.take([5])
self.assertEqual(result, pd.Period('2011-01-06', freq='D'))
result = idx.take([0, 1, 2])
expected = pd.period_range('2011-01-01', '2011-01-03', freq='D',
name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, 'D')
self.assertEqual(result.freq, expected.freq)
result = idx.take([0, 2, 4])
expected = pd.PeriodIndex(['2011-01-01', '2011-01-03', '2011-01-05'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([7, 4, 1])
expected = pd.PeriodIndex(['2011-01-08', '2011-01-05', '2011-01-02'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([3, 2, 5])
expected = PeriodIndex(['2011-01-04', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
result = idx.take([-3, 2, 5])
expected = PeriodIndex(['2011-01-29', '2011-01-03', '2011-01-06'],
freq='D', name='idx')
self.assert_index_equal(result, expected)
self.assertEqual(result.freq, expected.freq)
self.assertEqual(result.freq, 'D')
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
# '--with-coverage', '--cover-package=pandas.core'],
exit=False)
| gpl-2.0 |
maistrovas/My-Courses-Solutions | Coursera Algorithmic Thinking (Part 1)/Module 2/Application/Application2.py | 1 | 7494 | """
Provided code for Application portion of Module 2
Answers 4/6
Application Grade is 13 out of 15
Text Answers
-Question 2:
All three graphs are resilient in this case.
Question5:
-UPA and ER graphs are steel resilient
(UPA is very close to overcoming 25% roughnes)
in this type of attack.
"""
# general imports
import urllib2
import random
import timeit
import time
import math
import UPA
from collections import deque
from random import shuffle
import BFS_project as project
import matplotlib.pyplot as plt
import numpy as np
# CodeSkulptor import
#import simpleplot
#import codeskulptor
#codeskulptor.set_timeout(60)
# Desktop imports
#import matplotlib.pyplot as plt
############################################
# Provided code
def copy_graph(graph):
"""
Make a copy of a graph
"""
new_graph = {}
for node in graph:
new_graph[node] = set(graph[node])
return new_graph
def delete_node(ugraph, node):
"""
Delete a node from an undirected graph
"""
neighbors = ugraph[node]
ugraph.pop(node)
for neighbor in neighbors:
ugraph[neighbor].remove(node)
def targeted_order(ugraph):
"""
Compute a targeted attack order consisting
of nodes of maximal degree
Returns:
A list of nodes
"""
# copy the graph
new_graph = copy_graph(ugraph)
order = []
while len(new_graph) > 0:
max_degree = -1
for node in new_graph:
if len(new_graph[node]) > max_degree:
max_degree = len(new_graph[node])
max_degree_node = node
neighbors = new_graph[max_degree_node]
new_graph.pop(max_degree_node)
for neighbor in neighbors:
new_graph[neighbor].remove(max_degree_node)
order.append(max_degree_node)
return order
##########################################################
# Code for loading computer network graph
NETWORK_URL = "http://storage.googleapis.com/codeskulptor-alg/alg_rf7.txt"
def load_graph(graph_url):
"""
Function that loads a graph given the URL
for a text representation of the graph
Returns a dictionary that models a graph
"""
graph_file = urllib2.urlopen(graph_url)
graph_text = graph_file.read()
graph_lines = graph_text.split('\n')
graph_lines = graph_lines[ : -1]
print "Loaded graph with", len(graph_lines), "nodes"
counter = 0
answer_graph = {}
for line in graph_lines:
neighbors = line.split(' ')
node = int(neighbors[0])
answer_graph[node] = set([])
for neighbor in neighbors[1 : -1]:
counter +=1
answer_graph[node].add(int(neighbor))
print 'Number network edges = ', counter / 2
return answer_graph
def er_graph(n, p):
'''
implementation of ER algorithm
n - final number of nodes
p - probability
'''
graph = {key: set() for key in xrange(n)}
counter = 0
for i in xrange(n):
for j in xrange(n):
if i == j:
continue
if random.random() < p:
counter += 1
graph[i].add(j)
graph[j].add(i)
print 'Number of ER-edges=', counter
return graph
##UPA-Algorithm
def algorithm_upa(n, m):
'''
implementation of UPA algorithm
n - final number of nodes
m - number of existing nodes
p - probability for er_graph
'''
graph = er_graph(m, 1)
upa = UPA.UPATrial(m)
counter = 0
for i in xrange(m, n):
new_edges = upa.run_trial(m)
graph[i] = new_edges
for node in new_edges:
graph[node].add(i)
return graph
def random_order(graph):
'''
takes a graph and returns a list
of the nodes in the graph in some random order
'''
result = deque()
for node in graph:
result.append(node)
shuffle(result)
return result
loaded_graph = load_graph(NETWORK_URL)
er_ggraph = er_graph(1239, 0.004)
upa_graph = algorithm_upa(1239, 3)
def count_Uedges(ugraph):
'''
count edges in the graph
'''
counter = 0
for i in ugraph:
for j in ugraph[i]:
counter +=1
return counter/2
# print 'UPA edges = ', count_Uedges(upa_graph)
# print 'ER edges =', count_Uedges(er_ggraph)
# print 'Network graph edges =', count_Uedges(loaded_graph)
def plotting(net_g, er_g, upa_g, question):
"""
Plot an example with two curves with legends
x - number of nodes removed
y - size of the largest connect component
in the graphs resulting from the node removal.
"""
if question == 1:
print 'The function plots question 1'
network_order = random_order(net_g)
er_order = random_order(er_g)
upa_order = random_order(upa_g)
if question == 4:
print 'The function plots question 4'
network_order = targeted_order(net_g)
er_order = targeted_order(er_g)
upa_order = targeted_order(upa_g)
network_resil = project.compute_resilience(net_g, network_order)
er_resil = project.compute_resilience(er_g, er_order)
upa_resil = project.compute_resilience(upa_g, upa_order)
xvals_net = np.array([node for node in range(len(network_order) +1 )])
xvals_er = np.array([node for node in range(len(er_order) +1 )])
xvals_upa = np.array([node for node in range(len(upa_order) +1 )])
yvals_net = np.array(network_resil)
yvals_er = np.array(er_resil)
yvals_upa = np.array(upa_resil)
plt.figure('Application2 Plot')
plt.title('Resilience comparison')
plt.xlabel('Removed nodes')
plt.ylabel('Largest conected component')
plt.plot(xvals_net, yvals_net, '-b', label='Network-Data')
plt.plot(xvals_er, yvals_er, '-r', label='ER-Algorithm (p = 0.004)')
plt.plot(xvals_upa, yvals_upa, '-g', label='UPA-Algorithm (m = 3)')
plt.legend(loc='upper right')
plt.show()
'''
Questions 1,4
'''
plotting(loaded_graph, er_ggraph, upa_graph, 1)
#plotting(loaded_graph, er_ggraph, upa_graph, 4)
def measure_targeted_order(n, m, func):
graph = algorithm_upa(n, m)
return timeit.timeit(lambda: func(graph), number=1)
def fast_targeted_order(ugraph):
'''
comment
'''
ugraph = copy_graph(ugraph)
N = len(ugraph)
degree_sets = [set()] * N
for node, neighbors in ugraph.iteritems():
degree = len(neighbors)
degree_sets[degree].add(node)
order = []
for k in range(N - 1, -1, -1):
while degree_sets[k]:
u = degree_sets[k].pop()
for neighbor in ugraph[u]:
d = len(ugraph[neighbor])
degree_sets[d].remove(neighbor)
degree_sets[d - 1].add(neighbor)
order.append(u)
delete_node(ugraph, u)
return order
def question3():
'''
Function plotting Question 3
'''
xs = range(10, 1000, 10)
m = 5
ys_tagreted = [measure_targeted_order(n, m, targeted_order) for n in xs]
ys_fast_targeted = [measure_targeted_order(n, m, fast_targeted_order) for n in xs]
plt.plot(xs, ys_tagreted, '-r', label='targeted_order')
plt.plot(xs, ys_fast_targeted, '-b', label='fast_targeted_order')
plt.title('Targeted order functions performance (desktop Python)')
plt.xlabel('Number of nodes in the graph')
plt.ylabel('Execution time')
plt.legend(loc='upper left')
plt.tight_layout()
plt.show()
'''
Question3
Include only plotting
'''
question3()
| mit |
PatrickOReilly/scikit-learn | examples/linear_model/plot_ridge_path.py | 55 | 2138 | """
===========================================================
Plot Ridge coefficients as a function of the regularization
===========================================================
Shows the effect of collinearity in the coefficients of an estimator.
.. currentmodule:: sklearn.linear_model
:class:`Ridge` Regression is the estimator used in this example.
Each color represents a different feature of the
coefficient vector, and this is displayed as a function of the
regularization parameter.
This example also shows the usefulness of applying Ridge regression
to highly ill-conditioned matrices. For such matrices, a slight
change in the target variable can cause huge variances in the
calculated weights. In such cases, it is useful to set a certain
regularization (alpha) to reduce this variation (noise).
When alpha is very large, the regularization effect dominates the
squared loss function and the coefficients tend to zero.
At the end of the path, as alpha tends toward zero
and the solution tends towards the ordinary least squares, coefficients
exhibit big oscillations. In practise it is necessary to tune alpha
in such a way that a balance is maintained between both.
"""
# Author: Fabian Pedregosa -- <fabian.pedregosa@inria.fr>
# License: BSD 3 clause
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# X is the 10x10 Hilbert matrix
X = 1. / (np.arange(1, 11) + np.arange(0, 10)[:, np.newaxis])
y = np.ones(10)
###############################################################################
# Compute paths
n_alphas = 200
alphas = np.logspace(-10, -2, n_alphas)
clf = linear_model.Ridge(fit_intercept=False)
coefs = []
for a in alphas:
clf.set_params(alpha=a)
clf.fit(X, y)
coefs.append(clf.coef_)
###############################################################################
# Display results
ax = plt.gca()
ax.plot(alphas, coefs)
ax.set_xscale('log')
ax.set_xlim(ax.get_xlim()[::-1]) # reverse axis
plt.xlabel('alpha')
plt.ylabel('weights')
plt.title('Ridge coefficients as a function of the regularization')
plt.axis('tight')
plt.show()
| bsd-3-clause |
franchenstein/master_project | main.py | 1 | 18773 | #!/usr/bin
import probabilisticgraph as pg
import graphgenerator as gg
import dmarkov as dm
import sequenceanalyzer as sa
import yaml
import matplotlib.pyplot as plt
import synchwordfinder as swf
def main(config_file, fsw=False, terminate=False, dmark=False, generate=False, gen_seq=False, an_seq=False, plot=False,
seq_len=10000000, tag='default'):
with open(config_file, 'r') as f:
configs = yaml.load(f)
graph_path = configs['graph_path']
terminations = configs['terminations']
lmax = configs['lmax']
algorithms = configs['algorithms']
lrange = configs['lrange']
alpharange = configs['alpharange']
drange = configs['drange']
test = configs['test']
synch_words = configs['synch_words']
l2range = configs['l2range']
if fsw:
p = 'configs/' + graph_path + '/fsw_params.yaml'
with open(p, 'r') as f:
fsw_params = yaml.load(f)
find_synch_words(graph_path, fsw_params['w'], lmax, fsw_params['alpha'], fsw_params['test'], l2range)
if terminate:
terminate_graphs(graph_path, terminations, lrange, lmax, alpharange, test)
if dmark:
generate_dmarkov(graph_path, drange, lmax)
if generate:
seq_path = 'sequences/' + graph_path + '/original_length_' + str(seq_len) + '.yaml'
generate_graphs(algorithms, terminations, lmax, lrange, l2range, alpharange, graph_path, synch_words, test,
seq_path)
if gen_seq:
generate_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len)
if an_seq:
p = 'configs/' + graph_path + '/params.yaml'
with open(p, 'r') as f:
params = yaml.load(f)
analyze_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len,
params['to_analyze'], params['other_params'])
if plot:
p = 'configs/' + graph_path + '/plotconfigs.yaml'
with open(p, 'r') as f:
params = yaml.load(f)
if params['cond_entropy']:
plot_entropies(graph_path, algorithms, terminations, drange, lrange, alpharange, params['eval_l'], tag)
if params['autocorrelation']:
plot_autocorr(graph_path, algorithms, terminations, drange, lrange, alpharange, params['upto'], tag)
if params['kld']:
plot_others('kld', graph_path, algorithms, terminations, drange, lrange, alpharange, tag)
if params['l1metric']:
plot_others('l1metric', graph_path, algorithms, terminations, drange, lrange, alpharange, tag)
def find_synch_words(graph_path, w, l, alpha, test, l2range=[1]):
s = swf.SynchWordFinder(graph_path, w, l, alpha, test, l2range)
sw = s.find_synch_words()
path = "synch_words/" + graph_path + "/sw.yaml"
with open(path, "w") as f:
yaml.dump(sw, f)
def terminate_graphs(graph_path, terminations, lrange, lmax, alpharange, test):
g = pg.ProbabilisticGraph([], [])
if 'omega_inverted' in terminations:
synch_path = 'synch_words/' + graph_path + '/sw.yaml'
with open(synch_path, 'r') as f:
synch_words = yaml.load(f)
else:
synch_words = []
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'graphs/' + graph_path + '/rtp_L' + str(lmax) + '.yaml'
g.open_graph_file(p)
h = g.expand_last_level(l, t, alpha, test, synch_words)
path = 'graphs/' + graph_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml'
h.save_graph_file(path)
def generate_graphs(algorithms, terminations, maxl, lrange, l2range, alpharange, save_path, synch_words, test,
seq_path):
for t in terminations:
for l in lrange:
for alpha in alpharange:
p1 = 'graphs/' + save_path + '/rtp_L' + str(l) + '_alpha' + str(alpha) + '_' + t + '.yaml'
p2 = 'graphs/' + save_path + '/L' + str(l) + '_alpha' + str(alpha) + '_' + t
g = gg.GraphGenerator(p1, synch_words, p2, seq_path)
for algo in algorithms:
if algo == 'mk1':
g.mk1(test, alpha, l2range[-1])
elif algo == 'mk2':
g.mk2()
elif algo == 'mk2_moore':
g.mk2_moore(test, alpha, l2range[-1])
elif algo == 'mk3':
g.mk3(test, alpha)
if 'crissis' in algorithms:
p1 = 'graphs/' + save_path + '/rtp_L' + str(maxl) + '.yaml'
for l2 in l2range:
for alpha in alpharange:
p2 = 'graphs/' + save_path + '/L_2_' + str(l2) + '_alpha' + str(alpha)
g = gg.GraphGenerator(p1, synch_words, p2, seq_path)
g.crissis(test, alpha, l2)
def generate_dmarkov(graph_path, drange, lmax):
g = pg.ProbabilisticGraph([], [])
for d in drange:
p = 'graphs/' + graph_path + '/rtp_L' + str(lmax) + '.yaml'
g.open_graph_file(p)
h = dm.DMarkov(g, d)
path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
h.save_graph_file(path)
def generate_sequences(graph_path, algorithms, drange, terminations, lrange, l2range, alpharange, seq_len):
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
for d in drange:
p = 'dmarkov_d' + str(d) + '.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
elif algo == 'crissis':
for l2 in l2range:
for alpha in alpharange:
p = 'L_2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
else:
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
path = 'graphs/' + graph_path + '/' + p
generate_sequences_core(g, graph_path, path, p, seq_len)
def generate_sequences_core(g, graph_path, path, p, seq_len):
g.open_graph_file(path)
seq, v = g.generate_sequence(seq_len, g.states[0])
p = 'sequences/' + graph_path + '/len_' + str(seq_len) + '_' + p
with open(p, 'w') as f:
yaml.dump(seq, f)
def analyze_sequences(graph_path, algorithms, drange, terminations,
lrange, l2range, alpharange, seq_len, to_analyze, params):
for algo in algorithms:
if algo == 'dmark':
kld = []
l1 = []
for d in drange:
p = 'dmarkov_d' + str(d) + '.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/dmarkov.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/dmarkov.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
elif algo == 'crissis':
kld = []
l1 = []
for l2 in l2range:
for alpha in alpharange:
p = 'L_2_' + str(l2) + '_alpha' + str(alpha) + '_crissis.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/crissis.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/crissis.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
else:
for t in terminations:
kld = []
l1 = []
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
path = 'sequences/' + graph_path + '/len_' +str(seq_len) + '_' + p
seq_an = sa.SequenceAnalyzer(path)
kld_step, l1_step = analyze_sequences_core_1(graph_path, p, to_analyze, params, seq_an)
kld.append(kld_step)
l1.append(l1_step)
if to_analyze['kld']:
k_path = 'results/' + graph_path + '/kld/' + t + '_' + algo + '.yaml'
with open(k_path, 'w') as f:
yaml.dump(kld, f)
if to_analyze['l1metric']:
l_path = 'results/' + graph_path + '/l1metric/' + t + '_' + algo + '.yaml'
with open(l_path, 'w') as f:
yaml.dump(l1, f)
def analyze_sequences_core_1(graph_path, path, to_analyze, params, seq_an):
kld = 0
l1 = 0
if to_analyze['probabilities']:
p, alph = seq_an.calc_probs(params['L'])
p_path = 'results/'+ graph_path + '/probabilities/' + path
with open(p_path, 'w') as f:
yaml.dump([p, alph], f)
if to_analyze['cond_probabilities']:
check_probs(seq_an, graph_path, path)
p_cond = seq_an.calc_cond_probs(params['L']-1)
p_cond_path = 'results/'+ graph_path + '/probabilities/cond_' + path
with open(p_cond_path, 'w') as f:
yaml.dump(p_cond, f)
if to_analyze['cond_entropy']:
check_probs(seq_an, graph_path, path)
check_cond_probs(seq_an, graph_path, path)
h = seq_an.calc_cond_entropy(params['L']-1)
h_path = 'results/'+ graph_path + '/cond_entropies/' + path
with open(h_path, 'w') as f:
yaml.dump(h, f)
if to_analyze['autocorrelation']:
a = seq_an.calc_autocorrelation(params['upto'])
a_path = 'results/' + graph_path + '/autocorrelations/' + path
with open(a_path, 'w') as f:
yaml.dump(a, f)
if to_analyze['kld']:
check_probs(seq_an, graph_path, path)
p = load_reference_probs(graph_path)
kld = seq_an.calc_kldivergence(p, params['K'])
if to_analyze['l1metric']:
check_probs(seq_an, graph_path, path)
p = load_reference_probs(graph_path)
l1 = seq_an.calc_l1metric(p, params['l1'])
return [kld, l1]
def check_probs(seq_an, graph_path, path):
if not seq_an.probabilities:
p_path = 'results/'+ graph_path + '/probabilities/' + path
with open(p_path, 'r') as f:
p, alph = yaml.load(f)
seq_an.probabilities = p
seq_an.alphabet = alph
def check_cond_probs(seq_an, graph_path, path):
if not seq_an.conditional_probabilities:
p_path = 'results/'+ graph_path + '/probabilities/cond_' + path
with open(p_path, 'r') as f:
pcond = yaml.load(f)
seq_an.conditional_probabilities = pcond
def load_reference_probs(graph_path):
path = 'results/' + graph_path + '/probabilities/original.yaml'
with open(path, 'r') as f:
p = yaml.load(f)
return p[0]
def plot_entropies(graph_path, algorithms, terminations, drange, lrange, alpharange, eval_l, tag):
path_original = 'results/' + graph_path + '/cond_entropies/original.yaml'
with open(path_original, 'r') as f:
h_original = yaml.load(f)
h_base = h_original[eval_l]
h = []
states = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
h_dmark = []
states_dmark = []
for d in drange:
h_path = 'results/' + graph_path + '/cond_entropies/dmarkov_d' + str(d) + '.yaml'
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h_dmark.append(h_eval[eval_l])
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
states_dmark.append(len(g.states))
h.append(h_dmark)
states.append(states_dmark)
lbl = 'D-Markov, D from ' + str(drange[0]) + ' to ' + str(drange[-1])
labels.append(lbl)
else:
for t in terminations:
h_term = []
states_term = []
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
h_path = 'results/' + graph_path + '/cond_entropies/' + p
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h_term.append(h_eval[eval_l])
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
states_term.append(len(g.states))
lbl = algo + ', ' + t
labels.append(lbl)
h.append(h_term)
states.append(states_term)
i = 0
for entropy in h:
plt.semilogx(states[i], entropy, marker='o', label = labels[i])
i += 1
plt.axhline(y=h_base, color='k', linewidth = 3, label='Original sequence baseline')
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Number of states')
plt.ylabel('Conditional Entropy')
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
save_path = 'plots/' + graph_path + '/cond_entropies_' + tag + '.png'
plt.savefig(save_path, bbox_inches='tight')
plt.show()
def plot_others(kind, graph_path, algorithms, terminations, drange, lrange, alpharange, tag):
h = []
states = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
states_dmark = []
h_path = 'results/' + graph_path + '/' + kind + '/dmarkov.yaml'
with open(h_path, 'r') as f:
h.append(yaml.load(f))
for d in drange:
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
states_dmark.append(len(g.states))
states.append(states_dmark)
lbl = 'D-Markov, D from ' + str(drange[0]) + ' to ' + str(drange[-1])
labels.append(lbl)
else:
for t in terminations:
states_term = []
h_path = 'results/' + graph_path + '/' + kind + '/' + t + '_' + algo + '.yaml'
with open(h_path, 'r') as f:
h.append(yaml.load(f))
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
states_term.append(len(g.states))
lbl = algo + ', ' + t
labels.append(lbl)
states.append(states_term)
i = 0
for value in h:
print len(value)
print len(states[i])
plt.semilogx(states[i], value, marker='o', label=labels[i])
i += 1
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Number of states')
if kind == 'l1metric':
plt.ylabel('L1-Metric')
elif kind == 'kld':
plt.ylabel('Kullback-Leibler Divergence')
save_path = 'plots/' + graph_path + '/' + kind + '_' + tag + '.png'
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
plt.savefig(save_path, bbox_inches='tight')
plt.show()
def plot_autocorr(graph_path, algorithms, terminations, drange, lrange, alpharange, up_to, tag):
path_original = 'results/' + graph_path + '/autocorrelations/original.yaml'
with open(path_original, 'r') as f:
h_base = yaml.load(f)
h = []
labels = []
g = pg.ProbabilisticGraph([], [])
for algo in algorithms:
if algo == 'dmark':
for d in drange:
h_path = 'results/' + graph_path + '/autocorrelations/dmarkov_d' + str(d) + '.yaml'
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h.append(h_eval)
g_path = 'graphs/' + graph_path + '/dmarkov_d' + str(d) + '.yaml'
g.open_graph_file(g_path)
lbl = 'D-Markov, D = ' + str(d) + ', ' + str(len(g.states)) + ' states'
labels.append(lbl)
else:
for t in terminations:
for l in lrange:
for alpha in alpharange:
p = 'L' + str(l) + '_alpha' + str(alpha) + '_' + t + '_' + algo + '.yaml'
h_path = 'results/' + graph_path + '/autocorrelations/' + p
with open(h_path, 'r') as f:
h_eval = yaml.load(f)
h.append(h_eval)
g_path = 'graphs/' + graph_path + '/' + p
g.open_graph_file(g_path)
lbl = algo + ', ' + t + ', L = ' +str(l) + '. ' + str(len(g.states)) + ' states'
labels.append(lbl)
i = 0
x = range(1, up_to)
for autocorr in h:
plt.plot(x, autocorr[1:up_to], marker='o', label=labels[i])
i += 1
plt.plot(x, h_base[1:up_to], color='k', linewidth=3, label='Original sequence')
plt.legend(loc='upper right', shadow=False, fontsize='medium')
plt.xlabel('Lag')
plt.ylabel('Autocorrelation')
#fig_mngr = plt.get_current_fig_manager()
#fig_mngr.window.showMaximized()
save_path = 'plots/' + graph_path + '/autocorrelations_' + tag + '.png'
plt.savefig(save_path, bbox_inches='tight')
plt.show()
| mit |
zaxliu/deepnap | experiments/kdd-exps/experiment_message_2016-6-11_BUF2_G5_FR100_legacy.py | 1 | 4371 | # System built-in modules
import time
from datetime import datetime
import sys
import os
from multiprocessing import Pool
# Project dependency modules
import pandas as pd
pd.set_option('mode.chained_assignment', None) # block warnings due to DataFrame value assignment
import lasagne
# Project modules
sys.path.append('../')
from sleep_control.traffic_emulator import TrafficEmulator
from sleep_control.traffic_server import TrafficServer
from sleep_control.controller import QController, DummyController, NController
from sleep_control.integration import Emulation
from sleep_control.env_models import SJTUModel
from rl.qtable import QAgent
from rl.qnn_theano import QAgentNN
from rl.mixin import PhiMixin, DynaMixin
sys_stdout = sys.stdout
log_file_name = 'message_2016-6-11_BUF2_G5_FR100.log'
# Composite classes
class Phi_QAgentNN(PhiMixin, QAgentNN):
def __init__(self, **kwargs):
super(Phi_QAgentNN, self).__init__(**kwargs)
# Parameters
# |- Agent
# |- QAgent
actions = [(True, None), (False, 'serve_all')]
gamma, alpha = 0.5, 0.9
explore_strategy, epsilon = 'epsilon', 0.02 # exploration
# |- QAgentNN
# | - Phi
phi_length = 5
dim_state = (1, phi_length, 3+2)
range_state_slice = [(0, 10), (0, 10), (0, 10), (0, 1), (0, 1)]
range_state = [[range_state_slice]*phi_length]
# | - Other params
momentum, learning_rate = 0.9, 0.01 # SGD
num_buffer, memory_size = 2, 200
reward_scaling, reward_scaling_update = 1, 'adaptive'
batch_size, update_period, freeze_period, rs_period = 100, 4, 16, 32
# |- Env model
Rs, Rw, Rf, Co, Cw = 1.0, -1.0, -10.0, -5.0, 0.0
beta = 0.5
reward_params = (Rs, Rw, Rf, Co, Cw, beta)
# |- Env
# |- Time
start_time = pd.to_datetime('2014-11-05 09:20:00')
total_time = pd.Timedelta(days=7)
time_step = pd.Timedelta(seconds=2)
backoff_epochs = num_buffer*memory_size+phi_length
head_datetime = start_time - time_step*backoff_epochs
tail_datetime = head_datetime + total_time
TOTAL_EPOCHS = int(total_time/time_step)
# |- Reward
rewarding = {'serve': Rs, 'wait': Rw, 'fail': Rf}
# load from processed data
session_df =pd.read_csv(
filepath_or_buffer='../data/trace_dh3.dat',
parse_dates=['startTime_datetime', 'endTime_datetime']
)
te = TrafficEmulator(
session_df=session_df, time_step=time_step,
head_datetime=head_datetime, tail_datetime=tail_datetime,
rewarding=rewarding,
verbose=2)
ts = TrafficServer(cost=(Co, Cw), verbose=2)
agent = Phi_QAgentNN(
phi_length=phi_length,
dim_state=dim_state, range_state=range_state,
f_build_net = None,
batch_size=batch_size, learning_rate=learning_rate, momentum=momentum,
reward_scaling=reward_scaling, reward_scaling_update=reward_scaling_update, rs_period=rs_period,
update_period=update_period, freeze_period=freeze_period,
memory_size=memory_size, num_buffer=num_buffer,
# Below is QAgent params
actions=actions, alpha=alpha, gamma=gamma,
explore_strategy=explore_strategy, epsilon=epsilon,
verbose=2)
c = QController(agent=agent)
emu = Emulation(te=te, ts=ts, c=c, beta=beta)
# Heavyliftings
t = time.time()
sys.stdout = sys_stdout
log_path = './log/'
if os.path.isfile(log_path+log_file_name):
print "Log file {} already exist. Experiment cancelled.".format(log_file_name)
else:
log_file = open(log_path+log_file_name,"w")
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
while emu.epoch is not None and emu.epoch<TOTAL_EPOCHS:
# log time
print "Epoch {},".format(emu.epoch),
left = emu.te.head_datetime + emu.te.epoch*emu.te.time_step
right = left + emu.te.time_step
print "{} - {}".format(left.strftime("%Y-%m-%d %H:%M:%S"), right.strftime("%Y-%m-%d %H:%M:%S"))
emu.step()
print
if emu.epoch%(0.05*TOTAL_EPOCHS)==0:
sys.stdout = sys_stdout
print datetime.now().strftime('[%Y-%m-%d %H:%M:%S]'),
print '{}%'.format(int(100.0*emu.epoch/TOTAL_EPOCHS)),
print log_file_name
time.sleep(1)
sys.stdout = log_file
sys.stdout = sys_stdout
log_file.close()
print
print log_file_name,
print '{:.3f} sec,'.format(time.time()-t),
print '{:.3f} min'.format((time.time()-t)/60)
| bsd-3-clause |
LaboratoireMecaniqueLille/Ximea | old/ximea_display_REC.py | 1 | 8856 | import numpy as np
import cv2
import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import rcParams
import SimpleITK as sitk
from multiprocessing import Process, Pipe, Value
from matplotlib.widgets import Slider, Button
rcParams['font.family'] = 'serif'
#ps aux | grep python # KILL python process ...
#kill -9 insert_here_the_python_thread_number # ... try it if ximea won't open again.
plt.close('all')
############################## Parameters
nbr_images=400 # enter here the numer of images you need to save.
save_directory="/home/corentin/Bureau/ximea/" # path to save repository. BE AWARE that this scripts will erase previous images without regrets or remorse.
exposure= 10000 # exposition time, in microseconds
gain=2
height=1024 # reducing this one allows one to increase the FPS
width=1024 # doesn't work for this one
data_format=6 #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
external_trigger= False #set to True if you trig with external source (arduino...). BE AWARE there is a 10s waiting time for the ximea, meaning if you wait more that 10 sec to trigg, ximea will return an error and stop working.
set_FPS=False # set to True if you want to manually set the frame rate. It has 0.1 FPS precison @88FPS . If you need more precision, please use external trigger with arduino.
FPS=50 # set here the frame rate you need. This parameter will only work if set_FPS =True.
numdevice = 0 # Set the number of the camera (if several cameras plugged)
##############################
rec_send , rec_recv = Pipe()
anim_send, anim_recv = Pipe()
rec_signal=Value('i',0)
plot_signal=Value('i',0)
#cap = cv2.VideoCapture(cv2.CAP_XIAPI) # open the ximea device
#cap = cv2.VideoCapture(cv2.CAP_XIAPI + numdevice) # open the ximea device Ximea devices start at 1100. 1100 => device 0, 1101 => device 1
#if external_trigger==True: # this condition activate the trigger mode
#cap.set(cv2.CAP_PROP_XI_TRG_SOURCE,1)
#cap.set(cv2.CAP_PROP_XI_GPI_SELECTOR,1)
#cap.set(cv2.CAP_PROP_XI_GPI_MODE,1)
#cap.set(cv2.CAP_PROP_XI_DATA_FORMAT,data_format) #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
#if data_format ==1 or data_format==6: #increase the FPS in 10 bits
#cap.set(cv2.CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH,10)
#cap.set(cv2.CAP_PROP_XI_DATA_PACKING,1)
#cap.set(cv2.CAP_PROP_XI_AEAG,0)#auto gain auto exposure
#cap.set(cv2.CAP_PROP_FRAME_WIDTH,width); # doesn't work for this one
##cap.set(cv2.CAP_PROP_XI_OFFSET_X,640);
#cap.set(cv2.CAP_PROP_FRAME_HEIGHT,height); # reducing this one allows one to increase the FPS
##cap.set(cv2.CAP_PROP_XI_DOWNSAMPLING,0) # activate this one if you need to downsample your images, i.e if you need a very high FPS and other options are not enough
##print cap.get(cv2.CAP_PROP_FRAME_WIDTH)
##print cap.get(cv2.CAP_PROP_FRAME_HEIGHT);
#cap.set(cv2.CAP_PROP_EXPOSURE,exposure) # setting up exposure
#cap.set(cv2.CAP_PROP_GAIN,gain) #setting up gain
#ret, frame = cap.read() # read a frame
### initialising the histogram
#if cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==0 or cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==5:
#x=np.arange(0,256,4)
#if cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==1 or cap.get(cv2.CAP_PROP_XI_DATA_FORMAT)==6:
#x=np.arange(0,1024,4)
#hist=np.ones(np.shape(x))
### initialising graph and axes
rat = 0.7
Width=7
Height=7.
rat = 0.7
Width=7
Height=7.
fig=plt.figure(figsize=(Height, Width))
frame=np.zeros((height,width))
axim = fig.add_axes([0.15, 0.135, rat, rat*(Height/Width)]) # Image frame
im = axim.imshow(frame,cmap=plt.cm.gray,interpolation='nearest') # display the first image
RECax = plt.axes([0.01, (0.15+rat)/2, 0.05, 0.05]) # define size and position
button = Button(RECax, 'REC', color='red', hovercolor='0.975') # define button
#fig=plt.figure(figsize=(Height, Width))
#ax=fig.add_subplot(111)
##axim = fig.add_axes([0.15, 0.135, rat, rat*(Height/Width)]) # Image frame
##cax = fig.add_axes([0.17+rat, 0.135, 0.02, rat*(Height/Width)]) # colorbar frame
##axhist=fig.add_axes([0.15,(0.17+rat),rat,0.1]) # histogram frame
##axhist.set_xlim([0,max(x)]) #set histogram limit in x...
##axhist.set_ylim([0,1]) # ... and y
#frame=np.zeros((height,width))
#im = ax.imshow(frame,cmap=plt.cm.gray,interpolation='nearest') # display the first image
##li,= axhist.plot(x,hist) #plot first histogram
##cb = fig.colorbar(im, cax=cax) #plot colorbar
##cax.axis('off')
#fig.canvas.draw()
#plt.show(block=False)
### define cursors here
#axcolor = 'lightgoldenrodyellow'
#axExp = plt.axes([0.15, 0.02,rat, 0.03], axisbg=axcolor) # define position and size
#sExp = Slider(axExp, 'Exposure', 200, 50000, valinit=exposure) #Exposition max = 1000000 # define slider with previous position and size
#axGain= plt.axes([0.15, 0.07,rat, 0.03], axisbg=axcolor)
#sGain = Slider(axGain, 'Gain', -1, 6, valinit=gain)
#def update(val): # this function updates the exposure and gain values
#cap.set(cv2.CAP_PROP_EXPOSURE,sExp.val)
#cap.set(cv2.CAP_PROP_GAIN,sGain.val)
#fig.canvas.draw_idle()
#sExp.on_changed(update) # call for update everytime the cursors change
#sGain.on_changed(update)
### define buttons here
#RECax = plt.axes([0.01, (0.15+rat)/2, 0.05, 0.05]) # define size and position
#button = Button(RECax, 'REC', color='red', hovercolor='0.975') # define button
def REC(): # when called, read "nbr_images" and save them as .tiff in save_directory
while True:
while rec_signal.value!=1:
indent=True
t0=time.time()
last_t=0
i=0
while(i<nbr_images):
if set_FPS==True and last_t!=0: #This loop is used to set the FPS
while (time.time()-last_t) < 1./FPS:
indent=True
last_t=time.time()
frame = rec_recv.recv()
image=sitk.GetImageFromArray(frame)
sitk.WriteImage(image,save_directory+"img_%.5d.tiff" %i) ### works fast in 8 or 16 bit, always use sitk.
i+=1
rec_signal.value=0
t=time.time()-t0
print "FPS = %s"%(nbr_images/t)
#def REC_one(event): # when called, read 1 image and save it as .tiff in save_directory with a timestamp, so the next REC will not erase the previous one
#ret, frame = cap.read()
#image=sitk.GetImageFromArray(frame)
#sitk.WriteImage(image,save_directory+"img_%.5d.tiff" %(time.time())) ### works fast in 8 or 16 bit, always use sitk.
def REC2(event):
rec_signal.value=1
#button.on_clicked(REC2) # on click, call the REC function
### Main
def function(i):
print "function aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
plot_signal.value=1
frame=anim_recv.recv() # read a frame
plot_signal.value=0
print "received!"
print frame[0]
im.set_data(frame)
return axim
def get_frame():
cap = cv2.VideoCapture(cv2.CAP_XIAPI + numdevice) # open the ximea device Ximea devices start at 1100. 1100 => device 0, 1101 => device 1
if external_trigger==True: # this condition activate the trigger mode
cap.set(cv2.CAP_PROP_XI_TRG_SOURCE,1)
cap.set(cv2.CAP_PROP_XI_GPI_SELECTOR,1)
cap.set(cv2.CAP_PROP_XI_GPI_MODE,1)
cap.set(cv2.CAP_PROP_XI_DATA_FORMAT,data_format) #0=8 bits, 1=16(10)bits, 5=8bits RAW, 6=16(10)bits RAW
if data_format ==1 or data_format==6: #increase the FPS in 10 bits
cap.set(cv2.CAP_PROP_XI_OUTPUT_DATA_BIT_DEPTH,10)
cap.set(cv2.CAP_PROP_XI_DATA_PACKING,1)
cap.set(cv2.CAP_PROP_XI_AEAG,0)#auto gain auto exposure
cap.set(cv2.CAP_PROP_FRAME_WIDTH,width); # doesn't work for this one
#cap.set(cv2.CAP_PROP_XI_OFFSET_X,640);
cap.set(cv2.CAP_PROP_FRAME_HEIGHT,height); # reducing this one allows one to increase the FPS
#cap.set(cv2.CAP_PROP_XI_DOWNSAMPLING,0) # activate this one if you need to downsample your images, i.e if you need a very high FPS and other options are not enough
#print cap.get(cv2.CAP_PROP_FRAME_WIDTH)
#print cap.get(cv2.CAP_PROP_FRAME_HEIGHT);
cap.set(cv2.CAP_PROP_EXPOSURE,exposure) # setting up exposure
cap.set(cv2.CAP_PROP_GAIN,gain) #setting up gain
while True:
ret, frame=cap.read()
print "this is Patrick"
print frame[0]
print plot_signal.value
if plot_signal.value==1:
anim_send.send(frame)
print "sended"
print rec_signal.value
if rec_signal.value==1:
rec_send.send(frame)
Get_frame=Process(target=get_frame,args=())
time.sleep(1)
#Rec=Process(target=REC,args=())
#Ani=Process(target=ani,args=())
Get_frame.start()
time.sleep(1)
#Ani.start()
#Rec.start()
#ani = animation.FuncAnimation(fig, anim, interval=20, frames=20, blit=False) # This function call the anim function to update averything in the figure.
#plt.show()
Get_frame.join()
time.sleep(1)
#Ani.join()
animation.FuncAnimation(fig, function, interval=20, frames=20, blit=False) # This function call the anim function to update averything in the figure.
plt.show()
#Rec.join()
| gpl-2.0 |
rileymcdowell/genomic-neuralnet | genomic_neuralnet/methods/generic_keras_net.py | 1 | 6998 | from __future__ import print_function
import os
import time
import json
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Lambda
from keras.optimizers import Nadam as Trainer
#from keras.optimizers import Adam as Trainer
from keras.regularizers import WeightRegularizer
from keras.callbacks import EarlyStopping, Callback, LearningRateScheduler
from sklearn.preprocessing import MinMaxScaler
from genomic_neuralnet.util import get_is_time_stats, get_should_plot
TIMING_EPOCHS = 12000
class LossHistory(Callback):
def on_train_begin(self, logs={}):
self.losses = []
def on_epoch_end(self, epoch, logs={}):
self.losses.append(logs.get('loss'))
class NeuralNetContainer(object):
def __init__(self):
self.model = None
self.learning_rate = None
self.weight_decay = 0.0
self.dropout_prob = 0.0
self.epochs = 25
self.hidden_layers = (10,)
self.verbose = False
self.plot = False
def clone(self):
if not self.model is None:
raise NotImplemented('Cannot clone container after building model')
clone = NeuralNetContainer()
clone.learning_rate = self.learning_rate
clone.weight_decay = self.weight_decay
clone.dropout_prob = self.dropout_prob
clone.epochs = self.epochs
clone.hidden_layers = self.hidden_layers
clone.verbose = self.verbose
clone.plot = self.plot
return clone
def _build_nn(net_container, n_features):
model = Sequential()
# Change scale from (-1, 1) to (0, 1)
model.add(Lambda(lambda x: (x + 1) / 2, input_shape=(n_features,), output_shape=(n_features,)))
if net_container.weight_decay > 0.0:
weight_regularizer = WeightRegularizer(net_container.weight_decay)
else:
weight_regularizer = None
last_dim = n_features
for lidx, n_nodes in enumerate(net_container.hidden_layers):
# Layer, activation, and dropout, in that order.
model.add(Dense(output_dim=n_nodes, input_dim=last_dim, W_regularizer=weight_regularizer))
model.add(Activation('sigmoid'))
if net_container.dropout_prob > 0.0:
model.add(Dropout(net_container.dropout_prob))
last_dim = n_nodes
model.add(Dense(output_dim=1, input_dim=last_dim, bias=False))
model.add(Activation('linear'))
if not net_container.learning_rate is None:
optimizer = Trainer(lr=net_container.learning_rate)
else:
#optimizer = Trainer(lr=0.0001)
optimizer = Trainer()
model.compile( optimizer=optimizer
, loss='mean_squared_error'
)
net_container.model = model
def _train_net(container, X, y, override_epochs=None, is_check_train=False):
"""
Given a container, X (inputs), and y (outputs) train the network in the container.
* If override_epochs is an integer, just run that many epochs.
* The is_check_train parameter signifies that this training is a quick check to make
sure that the network is properly initialized and that the output error
is decreasing. The best "check trained" network will be passed in again
for an additional full set of training epochs.
"""
model = container.model
epochs = override_epochs if (not override_epochs is None) else container.epochs
verbose = int(container.verbose)
def rate_func(epoch):
if epochs - epoch == 2000:
# Settle down during last 2000 epochs.
model.optimizer.lr.set_value(model.optimizer.lr.get_value()/4.0)
if epochs - epoch == 500:
# Go a bit further in last 500 epochs.
model.optimizer.lr.set_value(model.optimizer.lr.get_value()/4.0)
return float(model.optimizer.lr.get_value())
lr_scheduler = LearningRateScheduler(rate_func)
loss_history = LossHistory()
callbacks = [loss_history, lr_scheduler]
model.fit( X,
y,
nb_epoch=epochs,
batch_size=X.shape[0] / 4,
verbose=verbose,
callbacks=callbacks
)
if (isinstance(override_epochs, int)) and (not is_check_train) and container.plot:
# Plot, but only if this is not overriden epochs.
import matplotlib.pyplot as plt
plt.plot(range(len(loss_history.losses)), loss_history.losses)
plt.show()
return loss_history.losses[-1]
def _predict(container, X):
model = container.model
return model.predict(X)
_NET_TRIES = 2
def _get_initial_net(container, n_features, X, y):
"""
Create a few networks. Start the training process for a few epochs, then take
the best one to continue training. This eliminates networks that are poorly
initialized and will not converge.
"""
candidates = []
for _ in range(_NET_TRIES):
cont = container.clone()
_build_nn(cont, n_features)
candidates.append(cont)
losses = []
for candidate in candidates:
# Train each candidate for 100 epochs.
loss = _train_net(candidate, X, y, override_epochs=100, is_check_train=True)
losses.append(loss)
best_idx = np.argmin(losses)
return candidates[best_idx]
def get_net_prediction( train_data, train_truth, test_data, test_truth
, hidden=(5,), weight_decay=0.0, dropout_prob=0.0
, learning_rate=None, epochs=25, verbose=False
, iter_id=None
):
container = NeuralNetContainer()
container.learning_rate = learning_rate
container.dropout_prob = dropout_prob
container.weight_decay = weight_decay
container.epochs = epochs
container.hidden_layers = hidden
container.verbose = verbose
container.plot = get_should_plot()
mms = MinMaxScaler(feature_range= (-1, 1)) # Scale output from -1 to 1.
train_y = mms.fit_transform(train_truth[:,np.newaxis])
n_features = train_data.shape[1]
collect_time_stats = get_is_time_stats()
if collect_time_stats:
start = time.time()
# Find and return an effectively initialized network to start.
container = _get_initial_net(container, n_features, train_data, train_y)
# Train the network.
if collect_time_stats:
# Train a specific time, never terminating early.
_train_net(container, train_data, train_y, override_epochs=TIMING_EPOCHS, is_check_train=False)
else:
# Normal training, enable all heuristics.
_train_net(container, train_data, train_y)
if collect_time_stats:
end = time.time()
print('Fitting took {} seconds'.format(end - start))
print(json.dumps({'seconds': end - start, 'hidden': container.hidden_layers}))
# Unsupervised (test) dataset.
predicted = _predict(container, test_data)
predicted = mms.inverse_transform(predicted)
return predicted.ravel()
| mit |
tntnatbry/tensorflow | tensorflow/examples/learn/iris_custom_model.py | 50 | 2613 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of Estimator for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import datasets
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def my_model(features, target):
"""DNN with three hidden layers, and dropout of 0.1 probability."""
# Convert the target to a one-hot tensor of shape (length of features, 3) and
# with a on-value of 1 for each one-hot vector of length 3.
target = tf.one_hot(target, 3, 1, 0)
# Create three fully connected layers respectively of size 10, 20, and 10 with
# each layer having a dropout probability of 0.1.
normalizer_fn = layers.dropout
normalizer_params = {'keep_prob': 0.9}
features = layers.stack(
features,
layers.fully_connected, [10, 20, 10],
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(features, 3, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = tf.contrib.layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='Adagrad',
learning_rate=0.1)
return ({
'class': tf.argmax(logits, 1),
'prob': tf.nn.softmax(logits)
}, loss, train_op)
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
classifier = learn.Estimator(model_fn=my_model)
classifier.fit(x_train, y_train, steps=1000)
y_predicted = [
p['class'] for p in classifier.predict(
x_test, as_iterable=True)
]
score = metrics.accuracy_score(y_test, y_predicted)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
matplotlib/viscm | tests.py | 1 | 4429 | from viscm.gui import *
from viscm.bezierbuilder import *
import numpy as np
import matplotlib as mpl
from matplotlib.backends.qt_compat import QtGui, QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
cms = {"viscm/examples/sample_linear.jscm",
"viscm/examples/sample_diverging.jscm",
"viscm/examples/sample_diverging_continuous.jscm"}
def test_editor_loads_native():
for k in cms:
with open(k) as f:
data = json.loads(f.read())
cm = Colormap(None, "CatmulClark", "CAM02-UCS")
cm.load(k)
viscm = viscm_editor(uniform_space=cm.uniform_space, cmtype=cm.cmtype, method=cm.method, **cm.params)
assert viscm.name == data["name"]
extensions = data["extensions"]["https://matplotlib.org/viscm"]
xp, yp, fixed = viscm.control_point_model.get_control_points()
assert extensions["fixed"] == fixed
assert len(extensions["xp"]) == len(xp)
assert len(extensions["yp"]) == len(yp)
assert len(xp) == len(yp)
for i in range(len(xp)):
assert extensions["xp"][i] == xp[i]
assert extensions["yp"][i] == yp[i]
assert extensions["min_Jp"] == viscm.min_Jp
assert extensions["max_Jp"] == viscm.max_Jp
assert extensions["filter_k"] == viscm.filter_k
assert extensions["cmtype"] == viscm.cmtype
colors = data["colors"]
colors = [[int(c[i:i + 2], 16) / 256 for i in range(0, 6, 2)] for c in [colors[i:i + 6] for i in range(0, len(colors), 6)]]
editor_colors = viscm.cmap_model.get_sRGB(num=256)[0].tolist()
for i in range(len(colors)):
for z in range(3):
assert colors[i][z] == np.rint(editor_colors[i][z] / 256)
# def test_editor_add_point():
# # Testing linear
# fig = plt.figure()
# figure_canvas = FigureCanvas(fig)
# linear = viscm_editor(min_Jp=40, max_Jp=60, xp=[-10, 10], yp=[0,0], figure=fig, cmtype="linear")
# Jp, ap, bp = linear.cmap_model.get_Jpapbp(3)
# eJp, eap, ebp = [40, 50, 60], [-10, 0, 10], [0, 0, 0]
# for i in range(3):
# assert approxeq(Jp[i], eJp[i])
# assert approxeq(ap[i], eap[i])
# assert approxeq(bp[i], ebp[i])
# rgb = linear.cmap_model.get_sRGB(3)[0]
# ergb = [[ 0.27446483, 0.37479529, 0.34722738],
# [ 0.44884374, 0.44012037, 0.43848162],
# [ 0.63153956, 0.49733664, 0.53352363]]
# for i in range(3):
# for z in range(3):
# assert approxeq(rgb[i][z], ergb[i][z])
# # Testing adding a point to linear
# linear.bezier_builder.mode = "add"
# qtEvent = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonPress, QtCore.QPoint(), QtCore.Qt.LeftButton, QtCore.Qt.LeftButton, QtCore.Qt.ShiftModifier)
# event = mpl.backend_bases.MouseEvent("button_press_event", figure_canvas, 0, 10, guiEvent=qtEvent)
# event.xdata = 0
# event.ydata = 10
# event.inaxes = linear.bezier_builder.ax
# linear.bezier_builder.on_button_press(event)
# Jp, ap, bp = linear.cmap_model.get_Jpapbp(3)
# eJp, eap, ebp = [40, 50, 60], [-10, 0, 10], [0, 5, 0]
# for i in range(3):
# assert approxeq(Jp[i], eJp[i])
# assert approxeq(ap[i], eap[i])
# assert approxeq(bp[i], ebp[i])
# rgb = linear.cmap_model.get_sRGB(3)[0]
# ergb = [[ 0.27446483, 0.37479529, 0.34722738],
# [ 0.46101392, 0.44012069, 0.38783966],
# [ 0.63153956, 0.49733664, 0.53352363]]
# for i in range(3):
# for z in range(3):
# assert approxeq(rgb[i][z], ergb[i][z])
# # Removing a point from linear
# linear.bezier_builder.mode = "remove"
# qtEvent = QtGui.QMouseEvent(QtCore.QEvent.MouseButtonPress, QtCore.QPoint(), QtCore.Qt.LeftButton, QtCore.Qt.LeftButton, QtCore.Qt.ControlModifier)
# event = mpl.backend_bases.MouseEvent("button_press_event", figure_canvas, 0, 10, guiEvent=qtEvent)
# event.xdata = 0
# event.ydata = 10
# event.inaxes = linear.bezier_builder.ax
# linear.bezier_builder.on_button_press(event)
# # Jp, ap, bp = linear.cmap_model.get_Jpapbp(3)
# # print(Jp, ap, bp)
# # print(rgb)
# # use mpl transformations
# print(linear.control_point_model.get_control_points())
# # print(linear.cmap_model.get_Jpapbp(3))
def approxeq(x, y, err=0.0001):
return abs(y - x) < err
| mit |
sanjayankur31/nest-simulator | pynest/examples/balancedneuron.py | 8 | 7344 | # -*- coding: utf-8 -*-
#
# balancedneuron.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Balanced neuron example
-----------------------
This script simulates a neuron driven by an excitatory and an
inhibitory population of neurons firing Poisson spike trains. The aim
is to find a firing rate for the inhibitory population that will make
the neuron fire at the same rate as the excitatory population.
Optimization is performed using the ``bisection`` method from Scipy,
simulating the network repeatedly.
This example is also shown in the article [1]_
References
~~~~~~~~~~
.. [1] Eppler JM, Helias M, Mulller E, Diesmann M, Gewaltig MO (2009). PyNEST: A convenient interface to the NEST
simulator, Front. Neuroinform.
http://dx.doi.org/10.3389/neuro.11.012.2008
"""
###############################################################################
# First, we import all necessary modules for simulation, analysis and
# plotting. Scipy should be imported before nest.
from scipy.optimize import bisect
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
###############################################################################
# Additionally, we set the verbosity using ``set_verbosity`` to
# suppress info messages.
nest.set_verbosity("M_WARNING")
nest.ResetKernel()
###############################################################################
# Second, the simulation parameters are assigned to variables.
t_sim = 25000.0 # how long we simulate
n_ex = 16000 # size of the excitatory population
n_in = 4000 # size of the inhibitory population
r_ex = 5.0 # mean rate of the excitatory population
r_in = 20.5 # initial rate of the inhibitory population
epsc = 45.0 # peak amplitude of excitatory synaptic currents
ipsc = -45.0 # peak amplitude of inhibitory synaptic currents
d = 1.0 # synaptic delay
lower = 15.0 # lower bound of the search interval
upper = 25.0 # upper bound of the search interval
prec = 0.01 # how close need the excitatory rates be
###############################################################################
# Third, the nodes are created using ``Create``. We store the returned
# handles in variables for later reference.
neuron = nest.Create("iaf_psc_alpha")
noise = nest.Create("poisson_generator", 2)
voltmeter = nest.Create("voltmeter")
spikerecorder = nest.Create("spike_recorder")
###################################################################################
# Fourth, the ``poisson_generator`` (`noise`) is configured.
# Note that we need not set parameters for the neuron, the spike recorder, and
# the voltmeter, since they have satisfactory defaults.
noise.rate = [n_ex * r_ex, n_in * r_in]
###############################################################################
# Fifth, the ``iaf_psc_alpha`` is connected to the ``spike_recorder`` and the
# ``voltmeter``, as are the two Poisson generators to the neuron. The command
# ``Connect`` has different variants. Plain `Connect` just takes the handles of
# pre- and postsynaptic nodes and uses the default values for weight and
# delay. It can also be called with a list of weights, as in the connection
# of the noise below.
# Note that the connection direction for the ``voltmeter`` is reversed compared
# to the ``spike_recorder``, because it observes the neuron instead of
# receiving events from it. Thus, ``Connect`` reflects the direction of signal
# flow in the simulation kernel rather than the physical process of inserting
# an electrode into the neuron. The latter semantics is presently not
# available in NEST.
nest.Connect(neuron, spikerecorder)
nest.Connect(voltmeter, neuron)
nest.Connect(noise, neuron, syn_spec={'weight': [[epsc, ipsc]], 'delay': 1.0})
###############################################################################
# To determine the optimal rate of the neurons in the inhibitory population,
# the network is simulated several times for different values of the
# inhibitory rate while measuring the rate of the target neuron. This is done
# by calling ``Simulate`` until the rate of the target neuron matches the rate
# of the neurons in the excitatory population with a certain accuracy. The
# algorithm is implemented in two steps:
#
# First, the function ``output_rate`` is defined to measure the firing rate
# of the target neuron for a given rate of the inhibitory neurons.
def output_rate(guess):
print("Inhibitory rate estimate: %5.2f Hz" % guess)
rate = float(abs(n_in * guess))
noise[1].rate = rate
spikerecorder.n_events = 0
nest.Simulate(t_sim)
out = spikerecorder.n_events * 1000.0 / t_sim
print(" -> Neuron rate: %6.2f Hz (goal: %4.2f Hz)" % (out, r_ex))
return out
###############################################################################
# The function takes the firing rate of the inhibitory neurons as an
# argument. It scales the rate with the size of the inhibitory population and
# configures the inhibitory Poisson generator (`noise[1]`) accordingly.
# Then, the spike counter of the ``spike_recorder`` is reset to zero. The
# network is simulated using ``Simulate``, which takes the desired simulation
# time in milliseconds and advances the network state by this amount of time.
# During simulation, the ``spike_recorder`` counts the spikes of the target
# neuron and the total number is read out at the end of the simulation
# period. The return value of ``output_rate()`` is the firing rate of the
# target neuron in Hz.
#
# Second, the scipy function ``bisect`` is used to determine the optimal
# firing rate of the neurons of the inhibitory population.
in_rate = bisect(lambda x: output_rate(x) - r_ex, lower, upper, xtol=prec)
print("Optimal rate for the inhibitory population: %.2f Hz" % in_rate)
###############################################################################
# The function ``bisect`` takes four arguments: first a function whose
# zero crossing is to be determined. Here, the firing rate of the target
# neuron should equal the firing rate of the neurons of the excitatory
# population. Thus we define an anonymous function (using `lambda`) that
# returns the difference between the actual rate of the target neuron and the
# rate of the excitatory Poisson generator, given a rate for the inhibitory
# neurons. The next two arguments are the lower and upper bound of the
# interval in which to search for the zero crossing. The fourth argument of
# ``bisect`` is the desired relative precision of the zero crossing.
#
# Finally, we plot the target neuron's membrane potential as a function of
# time.
nest.voltage_trace.from_device(voltmeter)
plt.show()
| gpl-2.0 |
djgagne/scikit-learn | sklearn/covariance/__init__.py | 389 | 1157 | """
The :mod:`sklearn.covariance` module includes methods and algorithms to
robustly estimate the covariance of features given a set of points. The
precision matrix defined as the inverse of the covariance is also estimated.
Covariance estimation is closely related to the theory of Gaussian Graphical
Models.
"""
from .empirical_covariance_ import empirical_covariance, EmpiricalCovariance, \
log_likelihood
from .shrunk_covariance_ import shrunk_covariance, ShrunkCovariance, \
ledoit_wolf, ledoit_wolf_shrinkage, \
LedoitWolf, oas, OAS
from .robust_covariance import fast_mcd, MinCovDet
from .graph_lasso_ import graph_lasso, GraphLasso, GraphLassoCV
from .outlier_detection import EllipticEnvelope
__all__ = ['EllipticEnvelope',
'EmpiricalCovariance',
'GraphLasso',
'GraphLassoCV',
'LedoitWolf',
'MinCovDet',
'OAS',
'ShrunkCovariance',
'empirical_covariance',
'fast_mcd',
'graph_lasso',
'ledoit_wolf',
'ledoit_wolf_shrinkage',
'log_likelihood',
'oas',
'shrunk_covariance']
| bsd-3-clause |