repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
sternshus/Arelle | setup.py | 1 | 17326 | """
Created on Jan 30, 2011
@author: Mark V Systems Limited
(c) Copyright 2011 Mark V Systems Limited, All rights reserved.
"""
import sys
import os
import datetime
from distutils.command.build_py import build_py as _build_py
def get_version():
"""
Utility function to return the current version of the library, as defined
by the version string in the arelle's _pkg_meta.py file. The format follows
the standard Major.Minor.Fix notation.
:return: The version string in the standard Major.Minor.Fix notation.
:rtype: str
"""
import imp
source_dir = 'arelle'
with open('{}/_pkg_meta.py'.format(source_dir), 'rb') as fp:
mod = imp.load_source('_pkg_meta', source_dir, fp)
return mod.version
setup_requires = ['lxml']
# install_requires specifies a list of package dependencies that are
# installed when 'python setup.py install' is run. On Linux/Mac systems
# this also allows installation directly from the github repository
# (using 'pip install -e git+git://github.com/rheimbuchArelle.git#egg=Arelle')
# and the install_requires packages are auto-installed as well.
install_requires = ['lxml']
options = {}
scripts = []
cxFreezeExecutables = []
cmdclass = {}
# Files that should not be passed through 3to2 conversion
# in python 2.7 builds
build_py27_unmodified = [
'arelle/webserver/bottle.py',
'arelle/PythonUtil.py'
]
# Files that should be excluded from python 2.7 builds
build_py27_excluded = [
'arelle/CntlrQuickBooks.py',
'arelle/CntlrWinMain.py',
'arelle/CntlrWinTooltip.py',
'arelle/Dialog*.py',
'arelle/UiUtil.py',
'arelle/ViewWin*.py',
'arelle/WatchRss.py'
]
def match_patterns(path, pattern_list=[]):
from fnmatch import fnmatch
for pattern in pattern_list:
if fnmatch(path, pattern):
return True
return False
# When building under python 2.7, run refactorings from lib3to2
class build_py27(_build_py):
def __init__(self, *args, **kwargs):
_build_py.__init__(self, *args, **kwargs)
import logging
from lib2to3 import refactor
import lib3to2.main
rt_logger = logging.getLogger("RefactoringTool")
rt_logger.addHandler(logging.StreamHandler())
fixers = refactor.get_fixers_from_package('lib3to2.fixes')
fixers.remove('lib3to2.fixes.fix_print')
self.rtool = lib3to2.main.StdoutRefactoringTool(
fixers,
None,
[],
False,
False
)
def copy_file(self, source, target, preserve_mode=True):
if match_patterns(source, build_py27_unmodified):
_build_py.copy_file(self, source, target, preserve_mode)
elif match_patterns(source, build_py27_excluded):
print("excluding: %s" % source)
elif source.endswith('.py'):
try:
print("3to2 converting: %s => %s" % (source, target))
with open(source, 'rt') as input:
# ensure file contents have trailing newline
source_content = input.read() + "\n"
nval = self.rtool.refactor_string(source_content, source)
if nval is not None:
with open(target, 'wt') as output:
output.write('from __future__ import print_function\n')
output.write(str(nval))
else:
raise(Exception("Failed to parse: %s" % source))
except Exception as e:
print("3to2 error (%s => %s): %s" % (source,target,e))
if sys.version_info[0] < 3:
setup_requires.append('3to2')
# cmdclass allows you to override the distutils commands that are
# run through 'python setup.py somecmd'. Under python 2.7 replace
# the 'build_py' with a custom subclass (build_py27) that invokes
# 3to2 refactoring on each python file as its copied to the build
# directory.
cmdclass['build_py'] = build_py27
# (Under python3 no commands are replaced, so the default command classes are used.)
try:
# Under python2.7, run build before running build_sphinx
import sphinx.setup_command
class build_sphinx_py27(sphinx.setup_command.BuildDoc):
def run(self):
self.run_command('build_py')
# Ensure sphinx looks at the "built" arelle libs that
# have passed through the 3to2 refactorings
# in `build_py27`.
sys.path.insert(0, os.path.abspath("./build/lib"))
sphinx.setup_command.BuildDoc.run(self)
if sys.version_info[0] < 3:
setup_requires.append('3to2')
setup_requires.append('sphinx')
# do a similar override of the 'build_sphinx' command to ensure
# that the 3to2-enabled build command runs before calling back to
# the default build_sphinx superclass.
cmdclass['build_sphinx'] = build_sphinx_py27
# There is also a python 2.x conditional switch in 'apidocs/conf.py'
# that sets sphinx to look at the 3to2 converted build files instead
# of the original unconverted source.
except ImportError as e:
print("Documentation production by Sphinx is not available: %s" % e)
''' this section was for py2app which no longer works on Mavericks,
switch below to cx_Freeze
if sys.platform == 'darwin':
from setuptools import setup, find_packages
setup_requires.append('py2app')
# Cross-platform applications generally expect sys.argv to
# be used for opening files.
plist = dict(CFBundleIconFile='arelle.icns',
NSHumanReadableCopyright='(c) 2010-2013 Mark V Systems Limited')
# MacOS launches CntlrWinMain and uses "ARELLE_ARGS" to effect console (shell) mode
options = dict(py2app=dict(app=['arelle/CntlrWinMain.py'],
iconfile='arelle/images/arelle.icns',
plist=plist,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info subdirectories to site-packages directory
#
includes=['lxml', 'lxml.etree',
'lxml._elementpath', 'pg8000',
'rdflib', 'rdflib.extras', 'rdflib.tools',
# more rdflib plugin modules may need to be added later
'rdflib.plugins', 'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers', 'rdflib.plugins.serializers.rdfxml', 'rdflib.plugins.serializers.turtle', 'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib']))
packages = find_packages('.')
dataFiles = [
#XXX: this breaks build on Lion/Py3.2 --mike
#'--iconfile',
('config',['arelle/config/' + f for f in os.listdir('arelle/config')]),
('doc',['arelle/doc/' + f for f in os.listdir('arelle/doc')]),
('examples',['arelle/examples/' + f for f in os.listdir('arelle/examples')]),
('images',['arelle/images/' + f for f in os.listdir('arelle/images')]),
('examples/plugin',['arelle/examples/plugin/' + f for f in os.listdir('arelle/examples/plugin')]),
('examples/plugin/locale/fr/LC_MESSAGES',['arelle/examples/plugin/locale/fr/LC_MESSAGES/' + f for f in os.listdir('arelle/examples/plugin/locale/fr/LC_MESSAGES')]),
('plugin',['arelle/plugin/' + f for f in os.listdir('arelle/plugin')]),
('scripts',['arelle/scripts/' + f for f in os.listdir('arelle/scripts-macOS')]),
]
for dir, subDirs, files in os.walk('arelle/locale'):
dir = dir.replace('\\','/')
dataFiles.append((dir[7:],
[dir + "/" + f for f in files]))
cx_FreezeExecutables = []
#End of py2app defunct section
'''
# works on ubuntu with hand-built cx_Freeze
if sys.platform in ('darwin', 'linux2', 'linux', 'sunos5'):
from setuptools import find_packages
try:
from cx_Freeze import setup, Executable
cx_FreezeExecutables = [
Executable(script="arelleGUI.pyw", targetName="arelle"),
Executable(script="arelleCmdLine.py")
]
except:
from setuptools import setup
cx_FreezeExecutables = []
packages = find_packages(
'.', # note that new setuptools finds plugin and lib unwanted stuff
exclude=['*.plugin.*', '*.lib.*']
)
dataFiles = []
includeFiles = [
('arelle/config','config'),
('arelle/doc','doc'),
('arelle/images','images'),
('arelle/locale','locale'),
('arelle/examples','examples'),
('arelle/examples/plugin','examples/plugin'),
(
'arelle/examples/plugin/locale/fr/LC_MESSAGES',
'examples/plugin/locale/fr/LC_MESSAGES'
),
('arelle/plugin','plugin')
]
if sys.platform == 'darwin':
includeFiles.append(('arelle/scripts-macOS','scripts'))
# copy tck and tk built as described: https://www.tcl.tk/doc/howto/compile.html#mac
includeFiles.append(('/Library/Frameworks/Tcl.framework/Versions/8.6/Resources/Scripts','tcl8.6'))
includeFiles.append(('/Library/Frameworks/Tk.framework/Versions/8.6/Resources/Scripts','tk8.6'))
else:
includeFiles.append(('arelle/scripts-unix','scripts'))
if os.path.exists("/etc/redhat-release"):
# extra libraries needed for red hat
includeFiles.append(('/usr/local/lib/libexslt.so', 'libexslt.so'))
includeFiles.append(('/usr/local/lib/libxml2.so', 'libxml2.so'))
# for some reason redhat needs libxml2.so.2 as well
includeFiles.append(('/usr/local/lib/libxml2.so.2', 'libxml2.so.2'))
includeFiles.append(('/usr/local/lib/libxslt.so', 'libxslt.so'))
includeFiles.append(('/usr/local/lib/libz.so', 'libz.so'))
if os.path.exists("version.txt"):
includeFiles.append(('version.txt', 'version.txt'))
includeLibs = [
'lxml', 'lxml.etree', 'lxml._elementpath', 'lxml.html',
'pg8000', 'pymysql', 'sqlite3', 'numpy',
# note cx_Oracle isn't here because it is version and machine specific,
# ubuntu not likely working
# more rdflib plugin modules may need to be added later
'rdflib',
'rdflib.extras',
'rdflib.tools',
'rdflib.plugins',
'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers',
'rdflib.plugins.serializers.rdfxml',
'rdflib.plugins.serializers.turtle',
'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib',
'openpyxl' # openpyxl's __init__.py must be hand edited, see https://bitbucket.org/openpyxl/openpyxl/pull-requests/80/__about__py/diff
]
# uncomment the next two files if cx_Freezing with EdgarRenderer
# note that openpyxl must be 2.1.4 at this time
if os.path.exists("arelle/plugin/EdgarRenderer"):
includeLibs += [
'cherrypy', 'cherrypy.wsgiserver.wsgiserver3',
'dateutil',
'dateutil.relativedelta',
'six',
'tornado',
'pyparsing',
'matplotlib'
]
import matplotlib
dataFiles += matplotlib.get_py2exe_datafiles()
if sys.platform != 'sunos5':
try:
import pyodbc # see if this is importable
includeLibs.append('pyodbc') # has C compiling errors on Sparc
except ImportError:
pass
options = dict(
build_exe={
"include_files": includeFiles,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info
# subdirectories to site-packages directory
#
"includes": includeLibs,
"packages": packages,
}
)
if sys.platform == 'darwin':
options["bdist_mac"] = {
"iconfile": 'arelle/images/arelle.icns',
"bundle_name": 'Arelle',
}
elif sys.platform == 'win32':
from setuptools import find_packages
from cx_Freeze import setup, Executable
# py2exe is not ported to Python 3 yet
# setup_requires.append('py2exe')
# FIXME: this should use the entry_points mechanism
packages = find_packages('.')
print("packages={}".format(packages))
dataFiles = None
win32includeFiles = [
('arelle\\config','config'),
('arelle\\doc','doc'),
('arelle\\images','images'),
('arelle\\locale','locale'),
('arelle\\examples','examples'),
('arelle\\examples\\plugin','examples/plugin'),
(
'arelle\\examples\\plugin\\locale\\fr\\LC_MESSAGES',
'examples/plugin/locale/fr/LC_MESSAGES'
),
('arelle\\plugin','plugin'),
('arelle\\scripts-windows','scripts')
]
if 'arelle.webserver' in packages:
win32includeFiles.append('QuickBooks.qwc')
if os.path.exists("version.txt"):
win32includeFiles.append('version.txt')
includeLibs = [
'lxml', 'lxml.etree', 'lxml._elementpath', 'lxml.html',
'pg8000', 'pymysql', 'cx_Oracle', 'pyodbc', 'sqlite3', 'numpy',
# more rdflib plugin modules may need to be added later
'rdflib',
'rdflib.extras',
'rdflib.tools',
'rdflib.plugins',
'rdflib.plugins.memory',
'rdflib.plugins.parsers',
'rdflib.plugins.serializers',
'rdflib.plugins.serializers.rdfxml',
'rdflib.plugins.serializers.turtle',
'rdflib.plugins.serializers.xmlwriter',
'rdflib.plugins.sparql',
'rdflib.plugins.stores',
'isodate', 'regex', 'gzip', 'zlib',
'openpyxl' # openpyxl's __init__.py must be hand edited, see https://bitbucket.org/openpyxl/openpyxl/pull-requests/80/__about__py/diff
]
# uncomment the next line if cx_Freezing with EdgarRenderer
# note that openpyxl must be 2.1.4 at this time
# removed tornado
if os.path.exists("arelle/plugin/EdgarRenderer"):
includeLibs += [
'cherrypy', 'cherrypy.wsgiserver.wsgiserver3',
'dateutil', 'dateutil.relativedelta',
"six", "pyparsing", "matplotlib"
]
options = dict(
build_exe={
"include_files": win32includeFiles,
"include_msvcr": True, # include MSVCR100
# "icon": 'arelle\\images\\arelle16x16and32x32.ico',
"packages": packages,
#
# rdflib & isodate egg files: rename .zip cpy lib & egg-info
# subdirectories to site-packages directory
#
"includes": includeLibs
}
)
# windows uses arelleGUI.exe to launch in GUI mode, arelleCmdLine.exe in command line mode
cx_FreezeExecutables = [
Executable(
script="arelleGUI.pyw",
base="Win32GUI",
icon='arelle\\images\\arelle16x16and32x32.ico',
),
Executable(
script="arelleCmdLine.py",
)
]
else:
#print("Your platform {0} isn't supported".format(sys.platform))
#sys.exit(1)
from setuptools import os, setup, find_packages
packages = find_packages(
'.', # note that new setuptools finds plugin and lib unwanted stuff
exclude=['*.plugin.*', '*.lib.*']
)
dataFiles = [(
'config',
['arelle/config/' + f for f in os.listdir('arelle/config')]
)]
cx_FreezeExecutables = []
timestamp = datetime.datetime.utcnow()
setup(
name='Arelle',
version=get_version(),
description='An open source XBRL platform',
long_description=open('README.md').read(),
author='arelle.org',
author_email='support@arelle.org',
url='http://www.arelle.org',
download_url='http://www.arelle.org/download',
cmdclass=cmdclass,
include_package_data=True, # note: this uses MANIFEST.in
packages=packages,
data_files=dataFiles,
platforms=['OS Independent'],
license='Apache-2',
keywords=['xbrl'],
classifiers=[
'Development Status :: 1 - Active',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache-2 License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Operating System :: OS Independent',
'Topic :: XBRL Validation and Versioning',
],
scripts=scripts,
entry_points={
'console_scripts': [
'arelle=arelle.CntlrCmdLine:main',
'arelle-gui=arelle.CntlrWinMain:main',
]
},
setup_requires=setup_requires,
install_requires=install_requires,
options=options,
executables=cx_FreezeExecutables,
)
| apache-2.0 |
Lawrence-Liu/scikit-learn | examples/cluster/plot_segmentation_toy.py | 258 | 3336 | """
===========================================
Spectral clustering for image segmentation
===========================================
In this example, an image with connected circles is generated and
spectral clustering is used to separate the circles.
In these settings, the :ref:`spectral_clustering` approach solves the problem
know as 'normalized graph cuts': the image is seen as a graph of
connected voxels, and the spectral clustering algorithm amounts to
choosing graph cuts defining regions while minimizing the ratio of the
gradient along the cut, and the volume of the region.
As the algorithm tries to balance the volume (ie balance the region
sizes), if we take circles with different sizes, the segmentation fails.
In addition, as there is no useful information in the intensity of the image,
or its gradient, we choose to perform the spectral clustering on a graph
that is only weakly informed by the gradient. This is close to performing
a Voronoi partition of the graph.
In addition, we use the mask of the objects to restrict the graph to the
outline of the objects. In this example, we are interested in
separating the objects one from the other, and not from the background.
"""
print(__doc__)
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.feature_extraction import image
from sklearn.cluster import spectral_clustering
###############################################################################
l = 100
x, y = np.indices((l, l))
center1 = (28, 24)
center2 = (40, 50)
center3 = (67, 58)
center4 = (24, 70)
radius1, radius2, radius3, radius4 = 16, 14, 15, 14
circle1 = (x - center1[0]) ** 2 + (y - center1[1]) ** 2 < radius1 ** 2
circle2 = (x - center2[0]) ** 2 + (y - center2[1]) ** 2 < radius2 ** 2
circle3 = (x - center3[0]) ** 2 + (y - center3[1]) ** 2 < radius3 ** 2
circle4 = (x - center4[0]) ** 2 + (y - center4[1]) ** 2 < radius4 ** 2
###############################################################################
# 4 circles
img = circle1 + circle2 + circle3 + circle4
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
# Convert the image into a graph with the value of the gradient on the
# edges.
graph = image.img_to_graph(img, mask=mask)
# Take a decreasing function of the gradient: we take it weakly
# dependent from the gradient the segmentation is close to a voronoi
graph.data = np.exp(-graph.data / graph.data.std())
# Force the solver to be arpack, since amg is numerically
# unstable on this example
labels = spectral_clustering(graph, n_clusters=4, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
###############################################################################
# 2 circles
img = circle1 + circle2
mask = img.astype(bool)
img = img.astype(float)
img += 1 + 0.2 * np.random.randn(*img.shape)
graph = image.img_to_graph(img, mask=mask)
graph.data = np.exp(-graph.data / graph.data.std())
labels = spectral_clustering(graph, n_clusters=2, eigen_solver='arpack')
label_im = -np.ones(mask.shape)
label_im[mask] = labels
plt.matshow(img)
plt.matshow(label_im)
plt.show()
| bsd-3-clause |
hesseltuinhof/mxnet | example/gluon/dcgan.py | 1 | 8010 | import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
import argparse
import mxnet as mx
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import autograd
import numpy as np
import logging
from datetime import datetime
import os
import time
def fill_buf(buf, i, img, shape):
n = buf.shape[0]//shape[1]
m = buf.shape[1]//shape[0]
sx = (i%m)*shape[0]
sy = (i//m)*shape[1]
buf[sy:sy+shape[1], sx:sx+shape[0], :] = img
return None
def visual(title, X, name):
assert len(X.shape) == 4
X = X.transpose((0, 2, 3, 1))
X = np.clip((X - np.min(X))*(255.0/(np.max(X) - np.min(X))), 0, 255).astype(np.uint8)
n = np.ceil(np.sqrt(X.shape[0]))
buff = np.zeros((int(n*X.shape[1]), int(n*X.shape[2]), int(X.shape[3])), dtype=np.uint8)
for i, img in enumerate(X):
fill_buf(buff, i, img, X.shape[1:3])
buff = buff[:,:,::-1]
plt.imshow(buff)
plt.title(title)
plt.savefig(name)
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', type=str, default='cifar10', help='dataset to use. options are cifar10 and imagenet.')
parser.add_argument('--batch-size', type=int, default=64, help='input batch size')
parser.add_argument('--nz', type=int, default=100, help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--nepoch', type=int, default=25, help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002, help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5, help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1, help='number of GPUs to use')
parser.add_argument('--netG', default='', help="path to netG (to continue training)")
parser.add_argument('--netD', default='', help="path to netD (to continue training)")
parser.add_argument('--outf', default='./results', help='folder to output images and model checkpoints')
parser.add_argument('--check-point', default=True, help="save results at each epoch or not")
opt = parser.parse_args()
print(opt)
logging.basicConfig(level=logging.DEBUG)
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
if opt.cuda:
ctx = mx.gpu(0)
else:
ctx = mx.cpu()
check_point = bool(opt.check_point)
outf = opt.outf
if not os.path.exists(outf):
os.makedirs(outf)
def transformer(data, label):
# resize to 64x64
data = mx.image.imresize(data, 64, 64)
# transpose from (64, 64, 3) to (3, 64, 64)
data = mx.nd.transpose(data, (2,0,1))
# normalize to [-1, 1]
data = data.astype(np.float32)/128 - 1
# if image is greyscale, repeat 3 times to get RGB image.
if data.shape[0] == 1:
data = mx.nd.tile(data, (3, 1, 1))
return data, label
train_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=True, transform=transformer),
batch_size=opt.batch_size, shuffle=True, last_batch='discard')
val_data = gluon.data.DataLoader(
gluon.data.vision.MNIST('./data', train=False, transform=transformer),
batch_size=opt.batch_size, shuffle=False)
# build the generator
netG = nn.Sequential()
with netG.name_scope():
# input is Z, going into a convolution
netG.add(nn.Conv2DTranspose(ngf * 8, 4, 1, 0, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 4 x 4
netG.add(nn.Conv2DTranspose(ngf * 4, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 8 x 8
netG.add(nn.Conv2DTranspose(ngf * 2, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 16 x 16
netG.add(nn.Conv2DTranspose(ngf, 4, 2, 1, use_bias=False))
netG.add(nn.BatchNorm())
netG.add(nn.Activation('relu'))
# state size. (ngf*8) x 32 x 32
netG.add(nn.Conv2DTranspose(nc, 4, 2, 1, use_bias=False))
netG.add(nn.Activation('tanh'))
# state size. (nc) x 64 x 64
# build the discriminator
netD = nn.Sequential()
with netD.name_scope():
# input is (nc) x 64 x 64
netD.add(nn.Conv2D(ndf, 4, 2, 1, use_bias=False))
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 32 x 32
netD.add(nn.Conv2D(ndf * 2, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 16 x 16
netD.add(nn.Conv2D(ndf * 4, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 8 x 8
netD.add(nn.Conv2D(ndf * 8, 4, 2, 1, use_bias=False))
netD.add(nn.BatchNorm())
netD.add(nn.LeakyReLU(0.2))
# state size. (ndf) x 4 x 4
netD.add(nn.Conv2D(2, 4, 1, 0, use_bias=False))
# loss
loss = gluon.loss.SoftmaxCrossEntropyLoss()
# initialize the generator and the discriminator
netG.initialize(mx.init.Normal(0.02), ctx=ctx)
netD.initialize(mx.init.Normal(0.02), ctx=ctx)
# trainer for the generator and the discriminator
trainerG = gluon.Trainer(netG.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
trainerD = gluon.Trainer(netD.collect_params(), 'adam', {'learning_rate': opt.lr, 'beta1': opt.beta1})
# ============printing==============
real_label = mx.nd.ones((opt.batch_size,), ctx=ctx)
fake_label = mx.nd.zeros((opt.batch_size,), ctx=ctx)
metric = mx.metric.Accuracy()
print('Training... ')
stamp = datetime.now().strftime('%Y_%m_%d-%H_%M')
iter = 0
for epoch in range(opt.nepoch):
tic = time.time()
btic = time.time()
for data, _ in train_data:
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real_t
data = data.as_in_context(ctx)
noise = mx.nd.random_normal(0, 1, shape=(opt.batch_size, nz, 1, 1), ctx=ctx)
with autograd.record():
output = netD(data)
output = output.reshape((opt.batch_size, 2))
errD_real = loss(output, real_label)
metric.update([real_label,], [output,])
fake = netG(noise)
output = netD(fake.detach())
output = output.reshape((opt.batch_size, 2))
errD_fake = loss(output, fake_label)
errD = errD_real + errD_fake
errD.backward()
metric.update([fake_label,], [output,])
trainerD.step(opt.batch_size)
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
with autograd.record():
output = netD(fake)
output = output.reshape((-1, 2))
errG = loss(output, real_label)
errG.backward()
trainerG.step(opt.batch_size)
name, acc = metric.get()
# logging.info('speed: {} samples/s'.format(opt.batch_size / (time.time() - btic)))
logging.info('discriminator loss = %f, generator loss = %f, binary training acc = %f at iter %d epoch %d' %(mx.nd.mean(errD).asscalar(), mx.nd.mean(errG).asscalar(), acc, iter, epoch))
if iter % 1 == 0:
visual('gout', fake.asnumpy(), name=os.path.join(outf,'fake_img_iter_%d.png' %iter))
visual('data', data.asnumpy(), name=os.path.join(outf,'real_img_iter_%d.png' %iter))
iter = iter + 1
btic = time.time()
name, acc = metric.get()
metric.reset()
logging.info('\nbinary training acc at epoch %d: %s=%f' % (epoch, name, acc))
logging.info('time: %f' % (time.time() - tic))
if check_point:
netG.save_params(os.path.join(outf,'generator_epoch_%d.params' %epoch))
netD.save_params(os.path.join(outf,'discriminator_epoch_%d.params' % epoch))
netG.save_params(os.path.join(outf, 'generator.params'))
netD.save_params(os.path.join(outf, 'discriminator.params'))
| apache-2.0 |
clemkoa/scikit-learn | examples/covariance/plot_outlier_detection.py | 15 | 5121 | """
==========================================
Outlier detection with several methods.
==========================================
When the amount of contamination is known, this example illustrates three
different ways of performing :ref:`outlier_detection`:
- based on a robust estimator of covariance, which is assuming that the
data are Gaussian distributed and performs better than the One-Class SVM
in that case.
- using the One-Class SVM and its ability to capture the shape of the
data set, hence performing better when the data is strongly
non-Gaussian, i.e. with two well-separated clusters;
- using the Isolation Forest algorithm, which is based on random forests and
hence more adapted to large-dimensional settings, even if it performs
quite well in the examples below.
- using the Local Outlier Factor to measure the local deviation of a given
data point with respect to its neighbors by comparing their local density.
The ground truth about inliers and outliers is given by the points colors
while the orange-filled area indicates which points are reported as inliers
by each method.
Here, we assume that we know the fraction of outliers in the datasets.
Thus rather than using the 'predict' method of the objects, we set the
threshold on the decision_function to separate out the corresponding
fraction.
"""
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn import svm
from sklearn.covariance import EllipticEnvelope
from sklearn.ensemble import IsolationForest
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
rng = np.random.RandomState(42)
# Example settings
n_samples = 200
outliers_fraction = 0.25
clusters_separation = [0, 1, 2]
# define two outlier detection tools to be compared
classifiers = {
"One-Class SVM": svm.OneClassSVM(nu=0.95 * outliers_fraction + 0.05,
kernel="rbf", gamma=0.1),
"Robust covariance": EllipticEnvelope(contamination=outliers_fraction),
"Isolation Forest": IsolationForest(max_samples=n_samples,
contamination=outliers_fraction,
random_state=rng),
"Local Outlier Factor": LocalOutlierFactor(
n_neighbors=35,
contamination=outliers_fraction)}
# Compare given classifiers under given settings
xx, yy = np.meshgrid(np.linspace(-7, 7, 100), np.linspace(-7, 7, 100))
n_inliers = int((1. - outliers_fraction) * n_samples)
n_outliers = int(outliers_fraction * n_samples)
ground_truth = np.ones(n_samples, dtype=int)
ground_truth[-n_outliers:] = -1
# Fit the problem with varying cluster separation
for i, offset in enumerate(clusters_separation):
np.random.seed(42)
# Data generation
X1 = 0.3 * np.random.randn(n_inliers // 2, 2) - offset
X2 = 0.3 * np.random.randn(n_inliers // 2, 2) + offset
X = np.r_[X1, X2]
# Add outliers
X = np.r_[X, np.random.uniform(low=-6, high=6, size=(n_outliers, 2))]
# Fit the model
plt.figure(figsize=(9, 7))
for i, (clf_name, clf) in enumerate(classifiers.items()):
# fit the data and tag outliers
if clf_name == "Local Outlier Factor":
y_pred = clf.fit_predict(X)
scores_pred = clf.negative_outlier_factor_
else:
clf.fit(X)
scores_pred = clf.decision_function(X)
y_pred = clf.predict(X)
threshold = stats.scoreatpercentile(scores_pred,
100 * outliers_fraction)
n_errors = (y_pred != ground_truth).sum()
# plot the levels lines and the points
if clf_name == "Local Outlier Factor":
# decision_function is private for LOF
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
subplot = plt.subplot(2, 2, i + 1)
subplot.contourf(xx, yy, Z, levels=np.linspace(Z.min(), threshold, 7),
cmap=plt.cm.Blues_r)
a = subplot.contour(xx, yy, Z, levels=[threshold],
linewidths=2, colors='red')
subplot.contourf(xx, yy, Z, levels=[threshold, Z.max()],
colors='orange')
b = subplot.scatter(X[:-n_outliers, 0], X[:-n_outliers, 1], c='white',
s=20, edgecolor='k')
c = subplot.scatter(X[-n_outliers:, 0], X[-n_outliers:, 1], c='black',
s=20, edgecolor='k')
subplot.axis('tight')
subplot.legend(
[a.collections[0], b, c],
['learned decision function', 'true inliers', 'true outliers'],
prop=matplotlib.font_manager.FontProperties(size=10),
loc='lower right')
subplot.set_xlabel("%d. %s (errors: %d)" % (i + 1, clf_name, n_errors))
subplot.set_xlim((-7, 7))
subplot.set_ylim((-7, 7))
plt.subplots_adjust(0.04, 0.1, 0.96, 0.94, 0.1, 0.26)
plt.suptitle("Outlier detection")
plt.show()
| bsd-3-clause |
agutieda/QuantEcon.py | quantecon/tests/test_matrix_eqn.py | 7 | 1050 | """
tests for quantecon.util
"""
from __future__ import division
from collections import Counter
import unittest
import numpy as np
from numpy.testing import assert_allclose
from nose.plugins.attrib import attr
import pandas as pd
from quantecon import matrix_eqn as qme
def test_solve_discrete_lyapunov_zero():
'Simple test where X is all zeros'
A = np.eye(4) * .95
B = np.zeros((4, 4))
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(X, np.zeros((4, 4)))
def test_solve_discrete_lyapunov_B():
'Simple test where X is same as B'
A = np.ones((2, 2)) * .5
B = np.array([[.5, -.5], [-.5, .5]])
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(B, X)
def test_solve_discrete_lyapunov_complex():
'Complex test, A is companion matrix'
A = np.array([[0.5 + 0.3j, 0.1 + 0.1j],
[ 1, 0]])
B = np.eye(2)
X = qme.solve_discrete_lyapunov(A, B)
assert_allclose(np.dot(np.dot(A, X), A.conj().transpose()) - X, -B,
atol=1e-15)
| bsd-3-clause |
manterd/myPhyloDB | functions/analysis/spls_graphs.py | 1 | 32640 | import datetime
from django.http import HttpResponse
import logging
import pandas as pd
from pyper import *
from scipy import stats
import json
from database.models import Kingdom, Phyla, Class, Order, Family, Genus, Species, OTU_99, \
ko_lvl1, ko_lvl2, ko_lvl3, \
nz_lvl1, nz_lvl2, nz_lvl3, nz_lvl4
import functions
reload(sys)
sys.setdefaultencoding('utf8')
LOG_FILENAME = 'error_log.txt'
pd.set_option('display.max_colwidth', -1)
def getSPLS(request, stops, RID, PID):
try:
while True:
if request.is_ajax():
allJson = request.body.split('&')[0]
all = json.loads(allJson)
functions.setBase(RID, 'Step 1 of 6: Reading normalized data file...')
functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...')
selectAll = int(all["selectAll"])
keggAll = int(all["keggAll"])
nzAll = int(all["nzAll"])
# Select samples and meta-variables from savedDF
metaValsCat = []
metaIDsCat = []
metaValsQuant = all['metaValsQuant']
metaIDsQuant = all['metaIDsQuant']
treeType = int(all['treeType'])
DepVar = int(all["DepVar"])
# Create meta-variable DataFrame, final sample list, final category and quantitative field lists based on tree selections
savedDF, metaDF, finalSampleIDs, catFields, remCatFields, quantFields, catValues, quantValues = functions.getMetaDF(request.user, metaValsCat, metaIDsCat, metaValsQuant, metaIDsQuant, DepVar)
allFields = catFields + quantFields
print "ok"
if not finalSampleIDs:
error = "No valid samples were contained in your final dataset.\nPlease select different variable(s)."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
result = ''
if treeType == 1:
if selectAll == 1:
result += 'Taxa level: Kingdom' + '\n'
elif selectAll == 2:
result += 'Taxa level: Phyla' + '\n'
elif selectAll == 3:
result += 'Taxa level: Class' + '\n'
elif selectAll == 4:
result += 'Taxa level: Order' + '\n'
elif selectAll == 5:
result += 'Taxa level: Family' + '\n'
elif selectAll == 6:
result += 'Taxa level: Genus' + '\n'
elif selectAll == 7:
result += 'Taxa level: Species' + '\n'
elif selectAll == 9:
result += 'Taxa level: OTU_99' + '\n'
elif treeType == 2:
if keggAll == 1:
result += 'KEGG Pathway level: 1' + '\n'
elif keggAll == 2:
result += 'KEGG Pathway level: 2' + '\n'
elif keggAll == 3:
result += 'KEGG Pathway level: 3' + '\n'
elif treeType == 3:
if nzAll == 1:
result += 'KEGG Enzyme level: 1' + '\n'
elif nzAll == 2:
result += 'KEGG Enzyme level: 2' + '\n'
elif nzAll == 3:
result += 'KEGG Enzyme level: 3' + '\n'
elif nzAll == 4:
result += 'KEGG Enzyme level: 4' + '\n'
elif keggAll == 5:
result += 'KEGG Enzyme level: GIBBs' + '\n'
elif keggAll == 6:
result += 'KEGG Enzyme level: Nitrogen cycle' + '\n'
result += 'Categorical variables selected by user: ' + ", ".join(catFields + remCatFields) + '\n'
result += 'Categorical variables not included in the statistical analysis (contains only 1 level): ' + ", ".join(remCatFields) + '\n'
result += 'Quantitative variables selected by user: ' + ", ".join(quantFields) + '\n'
result += '===============================================\n\n'
x_scale = all['x_scale']
if x_scale == 'yes':
result += 'Predictor (X) variables have been scaled by dividing by their standard deviation.\n'
else:
result += 'Predictor (X) variables have not been scaled.\n'
y_scale = all['y_scale']
if y_scale == 'yes':
result += 'All response (Y) variables (i.e., observed & predicted) have been scaled by dividing by their standard deviation.\n'
else:
result += 'All response (Y) variables (i.e., observed & predicted) have not been scaled.\n'
result += '===============================================\n\n'
functions.setBase(RID, 'Step 2 of 6: Selecting your chosen meta-variables...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 3 of 6: Selecting your chosen taxa or KEGG level...')
# filter otus based on user settings
remUnclass = all['remUnclass']
remZeroes = all['remZeroes']
perZeroes = int(all['perZeroes'])
filterData = all['filterData']
filterPer = int(all['filterPer'])
filterMeth = int(all['filterMeth'])
remMito = all['remMito']
remChloro = all['remChloro']
mapTaxa = 'no'
finalDF = pd.DataFrame()
if treeType == 1:
if selectAll != 8:
filteredDF = functions.filterDF(savedDF, DepVar, selectAll, remUnclass, remMito, remChloro, remZeroes, perZeroes, filterData, filterPer, filterMeth)
else:
filteredDF = savedDF.copy()
finalDF, missingList = functions.getTaxaDF(selectAll, '', filteredDF, metaDF, allFields, DepVar, RID, stops, PID)
if selectAll == 8:
result += '\nThe following PGPRs were not detected: ' + ", ".join(missingList) + '\n'
result += '===============================================\n'
if treeType == 2:
finalDF, allDF = functions.getKeggDF(keggAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if treeType == 3:
finalDF, allDF = functions.getNZDF(nzAll, '', savedDF, metaDF, DepVar, mapTaxa, RID, stops, PID)
if finalDF.empty:
error = "Selected taxa were not found in your selected samples."
myDict = {'error': error}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
# make sure column types are correct
finalDF[quantFields] = finalDF[quantFields].astype(float)
# transform Y, if requested
transform = int(all["transform"])
finalDF = functions.transformDF(transform, DepVar, finalDF)
# save location info to session
myDir = 'myPhyloDB/media/temp/spls/'
if not os.path.exists(myDir):
os.makedirs(myDir)
path = str(myDir) + str(RID) + '.biom'
functions.imploding_panda(path, treeType, DepVar, finalSampleIDs, metaDF, finalDF)
functions.setBase(RID, 'Step 3 of 6: Selecting your chosen taxa or KEGG level...done')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...')
if DepVar == 0:
result += 'Dependent Variable: Abundance' + '\n'
elif DepVar == 1:
result += 'Dependent Variable: Relative Abundance' + '\n'
elif DepVar == 2:
result += 'Dependent Variable: OTU Richness' + '\n'
elif DepVar == 3:
result += 'Dependent Variable: OTU Diversity' + '\n'
elif DepVar == 4:
result += 'Dependent Variable: Total Abundance' + '\n'
result += '\n===============================================\n'
count_rDF = pd.DataFrame()
if DepVar == 0:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund')
elif DepVar == 1:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rel_abund')
elif DepVar == 2:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='rich')
elif DepVar == 3:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='diversity')
elif DepVar == 4:
count_rDF = finalDF.pivot(index='sampleid', columns='rank_id', values='abund_16S')
count_rDF.fillna(0, inplace=True)
if os.name == 'nt':
r = R(RCMD="R/R-Portable/App/R-Portable/bin/R.exe", use_pandas=True)
else:
r = R(RCMD="R/R-Linux/bin/R", use_pandas=True)
functions.setBase(RID, 'Verifying R packages...missing packages are being installed')
# R packages from cran
r("list.of.packages <- c('mixOmics', 'spls', 'pheatmap', 'RColorBrewer')")
r("new.packages <- list.of.packages[!(list.of.packages %in% installed.packages()[,'Package'])]")
print r("if (length(new.packages)) install.packages(new.packages, repos='http://cran.us.r-project.org', dependencies=T)")
functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...')
print r("library(mixOmics)")
print r("library(spls)")
print r("library(pheatmap)")
print r("library(RColorBrewer)")
count_rDF.sort_index(axis=0, inplace=True)
metaDF.sort_values('sampleid', inplace=True)
r.assign("X", count_rDF)
r.assign("Y", metaDF[quantFields])
r.assign("names", count_rDF.columns.values)
r("colnames(X) <- names")
freqCut = all["freqCut"]
num = int(freqCut.split('/')[0])
den = int(freqCut.split('/')[1])
r.assign("num", num)
r.assign("den", den)
uniqueCut = int(all["uniqueCut"])
r.assign("uniqueCut", uniqueCut)
r("nzv_cols <- nearZeroVar(X, freqCut=num/den, uniqueCut=uniqueCut)")
r("if(length(nzv_cols$Position > 0)) X <- X[,-nzv_cols$Position]")
columns = r.get("ncol(X)")
if columns == 0:
myDict = {'error': "All predictor variables have zero variance.\nsPLS-Regr was aborted!"}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
if x_scale == 'yes':
r("X_scaled <- scale(X, center=FALSE, scale=TRUE)")
else:
r("X_scaled <- scale(X, center=FALSE, scale=FALSE)")
if y_scale == 'yes':
r("Y_scaled <- scale(Y, center=FALSE, scale=TRUE)")
else:
r("Y_scaled <- scale(Y, center=FALSE, scale=FALSE)")
r("detach('package:mixOmics', unload=TRUE)")
r("set.seed(1)")
r("maxK <- length(Y)")
spls_string = "cv <- cv.spls(X_scaled, Y_scaled, scale.x=FALSE, scale.y=FALSE, eta=seq(0.1, 0.9, 0.1), K=c(1:maxK), plot.it=FALSE)"
r.assign("cmd", spls_string)
r("eval(parse(text=cmd))")
r("f <- spls(X_scaled, Y_scaled, scale.x=FALSE, scale.y=FALSE, eta=cv$eta.opt, K=cv$K.opt)")
r("out <- capture.output(print(f))")
fout = r.get("out")
if fout is not None:
for i in fout:
result += str(i) + '\n'
else:
myDict = {'error': "Analysis did not converge.\nsPLS-Regr was aborted!"}
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
r("set.seed(1)")
r("ci.f <- ci.spls(f, plot.it=FALSE, plot.fix='y')")
r("cis <- ci.f$cibeta")
r("cf <- correct.spls(ci.f, plot.it=FALSE)")
r("out <- capture.output(cis)")
fout = r.get("out")
if fout is not None:
result += '\n\nBootstrapped confidence intervals of coefficients:\n'
for i in fout:
result += str(i) + '\n'
result += '\n===============================================\n'
r("coef.f <- coef(f)")
r("sum <- sum(coef.f != 0)")
total = r.get("sum")
functions.setBase(RID, 'Step 4 of 6: Calculating sPLS...done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
functions.setBase(RID, 'Step 5 of 6: Formatting sPLS coefficient table...')
finalDict = {}
if total is not None:
r("pred.f <- predict(f, type='fit')")
r("pred.f.rows <- row.names(pred.f)")
pred = r.get("pred.f")
rows = r.get("pred.f.rows")
predList = ['pred_' + s for s in quantFields]
predDF = pd.DataFrame(pred, columns=predList, index=rows)
meta_scaled = r.get("Y_scaled")
metaDF_scaled = pd.DataFrame(data=meta_scaled, columns=quantFields, index=rows)
resultDF = pd.merge(metaDF_scaled, predDF, left_index=True, right_index=True)
result += 'sPLS Model Fit (y = mx + b):\n'
result += 'y = predicted\n'
result += 'x = observed\n\n'
for i in xrange(len(quantFields)):
r.assign("myCol", quantFields[i])
x = r.get("Y_scaled[,myCol]")
x = x.tolist()
y = resultDF[predList[i]].astype(float).values.tolist()
slp, inter, r_value, p, se = stats.linregress(x, y)
r_sq = r_value * r_value
result += 'Variable: ' + str(quantFields[i]) + '\n'
result += 'Slope (m): ' + str(slp) + '\n'
result += 'Intercept (b): ' + str(inter) + '\n'
result += 'R2: ' + str(r_sq) + '\n'
result += 'Std Error: ' + str(se) + '\n\n\n'
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
r("coef.f.rows <- row.names(coef.f)")
cf = r.get("coef.f")
rows = r.get("coef.f.rows")
coeffsDF = pd.DataFrame(cf, columns=quantFields, index=rows)
coeffsDF = coeffsDF.loc[(coeffsDF != 0).any(axis=1)]
coeffsDF.sort_index(inplace=True)
taxIDList = coeffsDF.index.values.tolist()
namesDF = pd.DataFrame()
if treeType == 1:
if selectAll == 1:
taxNameList = Kingdom.objects.filter(kingdomid__in=taxIDList).values('kingdomid', 'kingdomName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'kingdomName': 'rank_name', 'kingdomid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 2:
taxNameList = Phyla.objects.filter(phylaid__in=taxIDList).values('phylaid', 'phylaName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'phylaName': 'rank_name', 'phylaid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 3:
taxNameList = Class.objects.filter(classid__in=taxIDList).values('classid', 'className')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'className': 'rank_name', 'classid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 4:
taxNameList = Order.objects.filter(orderid__in=taxIDList).values('orderid', 'orderName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'orderName': 'rank_name', 'orderid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 5:
taxNameList = Family.objects.filter(familyid__in=taxIDList).values('familyid', 'familyName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'familyName': 'rank_name', 'familyid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 6:
taxNameList = Genus.objects.filter(genusid__in=taxIDList).values('genusid', 'genusName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'genusName': 'rank_name', 'genusid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 7:
taxNameList = Species.objects.filter(speciesid__in=taxIDList).values('speciesid', 'speciesName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'speciesName': 'rank_name', 'speciesid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif selectAll == 9:
taxNameList = OTU_99.objects.filter(otuid__in=taxIDList).values('otuid', 'otuName')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'otuName': 'rank_name', 'otuid': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif treeType == 2:
if keggAll == 1:
taxNameList = ko_lvl1.objects.using('picrust').filter(ko_lvl1_id__in=taxIDList).values('ko_lvl1_id', 'ko_lvl1_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'ko_lvl1_name': 'rank_name', 'ko_lvl1_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif keggAll == 2:
taxNameList = ko_lvl2.objects.using('picrust').filter(ko_lvl2_id__in=taxIDList).values('ko_lvl2_id', 'ko_lvl2_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'ko_lvl2_name': 'rank_name', 'ko_lvl2_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif keggAll == 3:
taxNameList = ko_lvl3.objects.using('picrust').filter(ko_lvl3_id__in=taxIDList).values('ko_lvl3_id', 'ko_lvl3_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'ko_lvl3_name': 'rank_name', 'ko_lvl3_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif treeType == 3:
if nzAll == 1:
taxNameList = nz_lvl1.objects.using('picrust').filter(nz_lvl1_id__in=taxIDList).values('nz_lvl1_id', 'nz_lvl1_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl1_name': 'rank_name', 'nz_lvl1_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 2:
taxNameList = nz_lvl2.objects.using('picrust').filter(nz_lvl2_id__in=taxIDList).values('nz_lvl2_id', 'nz_lvl2_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl2_name': 'rank_name', 'nz_lvl2_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 3:
taxNameList = nz_lvl3.objects.using('picrust').filter(nz_lvl3_id__in=taxIDList).values('nz_lvl3_id', 'nz_lvl3_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl3_name': 'rank_name', 'nz_lvl3_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 4:
taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl4_name': 'rank_name', 'nz_lvl4_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 5:
taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl4_name': 'rank_name', 'nz_lvl4_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
elif nzAll == 6:
taxNameList = nz_lvl4.objects.using('picrust').filter(nz_lvl4_id__in=taxIDList).values('nz_lvl4_id', 'nz_lvl4_name')
namesDF = pd.DataFrame(list(taxNameList))
namesDF.rename(columns={'nz_lvl1_name': 'rank_name', 'nz_lvl1_id': 'rank_id'}, inplace=True)
namesDF.set_index('rank_id', inplace=True)
namesDF.sort_index(inplace=True)
taxNameList = namesDF['rank_name'].values.tolist()
if treeType == 2:
if keggAll > 1:
taxNameList[:] = (item[:20] + '...' if len(item) > 20 else item for item in taxNameList)
elif treeType == 3:
if nzAll > 1:
taxNameList[:] = (item.split()[0] for item in taxNameList)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
coeffsDF = pd.merge(namesDF, coeffsDF, left_index=True, right_index=True, how='inner')
coeffsDF.reset_index(inplace=True)
res_table = coeffsDF.to_html(classes="table display")
res_table = res_table.replace('border="1"', 'border="0"')
finalDict['res_table'] = str(res_table)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
resultDF.reset_index(inplace=True)
resultDF.rename(columns={'index': 'sampleid'}, inplace=True)
pred_table = resultDF.to_html(classes="table display")
pred_table = pred_table.replace('border="1"', 'border="0"')
finalDict['pred_table'] = str(pred_table)
functions.setBase(RID, 'Step 5 of 6: Formatting sPLS coefficient table...done')
functions.setBase(RID, 'Step 6 of 6: Formatting graph data for display...')
xAxisDict = {}
xAxisDict['categories'] = taxNameList
labelsDict = {}
labelsDict['rotation'] = 270
labelsDict['enabled'] = True
labelsDict['style'] = {'fontSize': '14px'}
xAxisDict['labels'] = labelsDict
xAxisDict['title'] = {'text': None}
xAxisDict['tickLength'] = 0
yAxisDict = {}
yAxisDict['categories'] = quantFields
yAxisDict['labels'] = {'style': {'fontSize': '14px'}}
yAxisDict['title'] = {'text': None}
seriesList = []
seriesDict = {}
seriesDict['borderWidth'] = '1'
row, col = coeffsDF.shape
dataList = []
for i in xrange(row):
for j in xrange(len(quantFields)):
val = round(coeffsDF[quantFields[j]].iloc[i], 5)
tup = (i, j, val)
obsList = list(tup)
dataList.append(obsList)
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
seriesDict['data'] = dataList
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
labelDict = {}
labelDict['enabled'] = True
labelDict['color'] = 'black',
labelDict['syle'] = {'textShadow': 'none'}
seriesList.append(seriesDict)
finalDict['xAxis'] = xAxisDict
finalDict['yAxis'] = yAxisDict
finalDict['series'] = seriesList
# R clustered heatmap
clustDF = coeffsDF.drop('rank_id', axis=1)
row, col = clustDF.shape
method = all['methodVal']
metric = all['metricVal']
path = "myPhyloDB/media/temp/spls/Rplots/" + str(RID) + ".spls.pdf"
if os.path.exists(path):
os.remove(path)
if not os.path.exists('myPhyloDB/media/temp/spls/Rplots'):
os.makedirs('myPhyloDB/media/temp/spls/Rplots')
height = 2.5 + 0.2*row
width = 5 + 0.2*(col-1)
file = "pdf('myPhyloDB/media/temp/spls/Rplots/" + str(RID) + ".spls.pdf', height=" + str(height) + ", width=" + str(width) + ", onefile=FALSE)"
r.assign("cmd", file)
r("eval(parse(text=cmd))")
r.assign("df", clustDF[quantFields])
r("df <- as.matrix(df)")
r.assign("rows", taxNameList)
r("rownames(df) <- rows")
r("col.pal <- brewer.pal(9,'RdBu')")
if row > 2 and col > 3:
hmap_str = "pheatmap(df, fontsize=12, color=col.pal, clustering_method='" + str(method) + "', clustering_distance_rows='" + str(metric) + "', clustering_distance_cols='" + str(metric) + "')"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
if row > 2 and col <= 3:
hmap_str = "pheatmap(df, color=col.pal, cluster_col=FALSE, clustering_method='" + str(method) + "', clustering_distance_rows='" + str(metric) + "')"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
if row <= 2 and col > 3:
hmap_str = "pheatmap(df, color=col.pal, cluster_row=FALSE, clustering_method='" + str(method) + "', clustering_distance_cols='" + str(metric) + "')"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
if row <= 2 and col <= 3:
hmap_str = "pheatmap(df, color=col.pal, cluster_col=FALSE, cluster_row=FALSE)"
r.assign("cmd", hmap_str)
r("eval(parse(text=cmd))")
r("dev.off()")
finalDict['text'] = result
functions.setBase(RID, 'Step 6 of 6: Formatting graph data for display...done!')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
if stops[PID] == RID:
res = ''
return HttpResponse(res, content_type='application/json')
# /\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//\ #
finalDict['error'] = 'none'
res = json.dumps(finalDict)
return HttpResponse(res, content_type='application/json')
except Exception as e:
if not stops[PID] == RID:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
myDate = "\nDate: " + str(datetime.datetime.now()) + "\n"
logging.exception(myDate)
myDict = {}
myDict['error'] = "There was an error during your analysis:\nError: " + str(e.message) + "\nTimestamp: " + str(datetime.datetime.now())
res = json.dumps(myDict)
return HttpResponse(res, content_type='application/json')
| gpl-3.0 |
zycdragonball/tensorflow | tensorflow/contrib/learn/python/learn/tests/dataframe/dataframe_test.py | 62 | 3753 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests of the DataFrame class."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python import learn
from tensorflow.contrib.learn.python.learn.tests.dataframe import mocks
from tensorflow.python.framework import dtypes
from tensorflow.python.platform import test
def setup_test_df():
"""Create a dataframe populated with some test columns."""
df = learn.DataFrame()
df["a"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor a", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
df["b"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor b", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out2")
df["c"] = learn.TransformedSeries(
[mocks.MockSeries("foobar", mocks.MockTensor("Tensor c", dtypes.int32))],
mocks.MockTwoOutputTransform("iue", "eui", "snt"), "out1")
return df
class DataFrameTest(test.TestCase):
"""Test of `DataFrame`."""
def test_create(self):
df = setup_test_df()
self.assertEqual(df.columns(), frozenset(["a", "b", "c"]))
def test_select_columns(self):
df = setup_test_df()
df2 = df.select_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["a", "c"]))
def test_exclude_columns(self):
df = setup_test_df()
df2 = df.exclude_columns(["a", "c"])
self.assertEqual(df2.columns(), frozenset(["b"]))
def test_get_item(self):
df = setup_test_df()
c1 = df["b"]
self.assertEqual(
mocks.MockTensor("Mock Tensor 2", dtypes.int32), c1.build())
def test_del_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
del df["b"]
self.assertEqual(2, len(df))
self.assertEqual(df.columns(), frozenset(["a", "c"]))
def test_set_item_column(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn",
mocks.MockTensor("Tensor ", dtypes.int32))
df["quack"] = col1
self.assertEqual(4, len(df))
col2 = df["quack"]
self.assertEqual(col1, col2)
def test_set_item_column_multi(self):
df = setup_test_df()
self.assertEqual(3, len(df))
col1 = mocks.MockSeries("QuackColumn", [])
col2 = mocks.MockSeries("MooColumn", [])
df["quack", "moo"] = [col1, col2]
self.assertEqual(5, len(df))
col3 = df["quack"]
self.assertEqual(col1, col3)
col4 = df["moo"]
self.assertEqual(col2, col4)
def test_set_item_pandas(self):
# TODO(jamieas)
pass
def test_set_item_numpy(self):
# TODO(jamieas)
pass
def test_build(self):
df = setup_test_df()
result = df.build()
expected = {
"a": mocks.MockTensor("Mock Tensor 1", dtypes.int32),
"b": mocks.MockTensor("Mock Tensor 2", dtypes.int32),
"c": mocks.MockTensor("Mock Tensor 1", dtypes.int32)
}
self.assertEqual(expected, result)
if __name__ == "__main__":
test.main()
| apache-2.0 |
idlead/scikit-learn | sklearn/neighbors/tests/test_approximate.py | 26 | 19053 | """
Testing for the approximate neighbor search using
Locality Sensitive Hashing Forest module
(sklearn.neighbors.LSHForest).
"""
# Author: Maheshakya Wijewardena, Joel Nothman
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
def test_neighbors_accuracy_with_n_candidates():
# Checks whether accuracy increases as `n_candidates` increases.
n_candidates_values = np.array([.1, 50, 500])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_candidates_values.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, n_candidates in enumerate(n_candidates_values):
lshf = LSHForest(n_candidates=n_candidates)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
def test_neighbors_accuracy_with_n_estimators():
# Checks whether accuracy increases as `n_estimators` increases.
n_estimators = np.array([1, 10, 100])
n_samples = 100
n_features = 10
n_iter = 10
n_points = 5
rng = np.random.RandomState(42)
accuracies = np.zeros(n_estimators.shape[0], dtype=float)
X = rng.rand(n_samples, n_features)
for i, t in enumerate(n_estimators):
lshf = LSHForest(n_candidates=500, n_estimators=t)
ignore_warnings(lshf.fit)(X)
for j in range(n_iter):
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_points,
return_distance=False)
distances = pairwise_distances(query, X, metric='cosine')
ranks = np.argsort(distances)[0, :n_points]
intersection = np.intersect1d(ranks, neighbors).shape[0]
ratio = intersection / float(n_points)
accuracies[i] = accuracies[i] + ratio
accuracies[i] = accuracies[i] / float(n_iter)
# Sorted accuracies should be equal to original accuracies
assert_true(np.all(np.diff(accuracies) >= 0),
msg="Accuracies are not non-decreasing.")
# Highest accuracy should be strictly greater than the lowest
assert_true(np.ptp(accuracies) > 0,
msg="Highest accuracy is not strictly greater than lowest.")
@ignore_warnings
def test_kneighbors():
# Checks whether desired number of neighbors are returned.
# It is guaranteed to return the requested number of neighbors
# if `min_hash_match` is set to 0. Returned distances should be
# in ascending order.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
# Test unfitted estimator
assert_raises(ValueError, lshf.kneighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
neighbors = lshf.kneighbors(query, n_neighbors=n_neighbors,
return_distance=False)
# Desired number of neighbors should be returned.
assert_equal(neighbors.shape[1], n_neighbors)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.kneighbors(queries,
n_neighbors=1,
return_distance=True)
assert_equal(neighbors.shape[0], n_queries)
assert_equal(distances.shape[0], n_queries)
# Test only neighbors
neighbors = lshf.kneighbors(queries, n_neighbors=1,
return_distance=False)
assert_equal(neighbors.shape[0], n_queries)
# Test random point(not in the data set)
query = rng.randn(n_features).reshape(1, -1)
lshf.kneighbors(query, n_neighbors=1,
return_distance=False)
# Test n_neighbors at initialization
neighbors = lshf.kneighbors(query, return_distance=False)
assert_equal(neighbors.shape[1], 5)
# Test `neighbors` has an integer dtype
assert_true(neighbors.dtype.kind == 'i',
msg="neighbors are not in integer dtype.")
def test_radius_neighbors():
# Checks whether Returned distances are less than `radius`
# At least one point should be returned when the `radius` is set
# to mean distance from the considering point to other points in
# the database.
# Moreover, this test compares the radius neighbors of LSHForest
# with the `sklearn.neighbors.NearestNeighbors`.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
# Test unfitted estimator
assert_raises(ValueError, lshf.radius_neighbors, X[0])
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
# Select a random point in the dataset as the query
query = X[rng.randint(0, n_samples)].reshape(1, -1)
# At least one neighbor should be returned when the radius is the
# mean distance from the query to the points of the dataset.
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
neighbors = lshf.radius_neighbors(query, radius=mean_dist,
return_distance=False)
assert_equal(neighbors.shape, (1,))
assert_equal(neighbors.dtype, object)
assert_greater(neighbors[0].shape[0], 0)
# All distances to points in the results of the radius query should
# be less than mean_dist
distances, neighbors = lshf.radius_neighbors(query,
radius=mean_dist,
return_distance=True)
assert_array_less(distances[0], mean_dist)
# Multiple points
n_queries = 5
queries = X[rng.randint(0, n_samples, n_queries)]
distances, neighbors = lshf.radius_neighbors(queries,
return_distance=True)
# dists and inds should not be 1D arrays or arrays of variable lengths
# hence the use of the object dtype.
assert_equal(distances.shape, (n_queries,))
assert_equal(distances.dtype, object)
assert_equal(neighbors.shape, (n_queries,))
assert_equal(neighbors.dtype, object)
# Compare with exact neighbor search
query = X[rng.randint(0, n_samples)].reshape(1, -1)
mean_dist = np.mean(pairwise_distances(query, X, metric='cosine'))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
distances_exact, _ = nbrs.radius_neighbors(query, radius=mean_dist)
distances_approx, _ = lshf.radius_neighbors(query, radius=mean_dist)
# Radius-based queries do not sort the result points and the order
# depends on the method, the random_state and the dataset order. Therefore
# we need to sort the results ourselves before performing any comparison.
sorted_dists_exact = np.sort(distances_exact[0])
sorted_dists_approx = np.sort(distances_approx[0])
# Distances to exact neighbors are less than or equal to approximate
# counterparts as the approximate radius query might have missed some
# closer neighbors.
assert_true(np.all(np.less_equal(sorted_dists_exact,
sorted_dists_approx)))
@ignore_warnings
def test_radius_neighbors_boundary_handling():
X = [[0.999, 0.001], [0.5, 0.5], [0, 1.], [-1., 0.001]]
n_points = len(X)
# Build an exact nearest neighbors model as reference model to ensure
# consistency between exact and approximate methods
nnbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
# Build a LSHForest model with hyperparameter values that always guarantee
# exact results on this toy dataset.
lsfh = LSHForest(min_hash_match=0, n_candidates=n_points).fit(X)
# define a query aligned with the first axis
query = [[1., 0.]]
# Compute the exact cosine distances of the query to the four points of
# the dataset
dists = pairwise_distances(query, X, metric='cosine').ravel()
# The first point is almost aligned with the query (very small angle),
# the cosine distance should therefore be almost null:
assert_almost_equal(dists[0], 0, decimal=5)
# The second point form an angle of 45 degrees to the query vector
assert_almost_equal(dists[1], 1 - np.cos(np.pi / 4))
# The third point is orthogonal from the query vector hence at a distance
# exactly one:
assert_almost_equal(dists[2], 1)
# The last point is almost colinear but with opposite sign to the query
# therefore it has a cosine 'distance' very close to the maximum possible
# value of 2.
assert_almost_equal(dists[3], 2, decimal=5)
# If we query with a radius of one, all the samples except the last sample
# should be included in the results. This means that the third sample
# is lying on the boundary of the radius query:
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1)
assert_array_equal(np.sort(exact_idx[0]), [0, 1, 2])
assert_array_equal(np.sort(approx_idx[0]), [0, 1, 2])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-1])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-1])
# If we perform the same query with a slighltly lower radius, the third
# point of the dataset that lay on the boundary of the previous query
# is now rejected:
eps = np.finfo(np.float64).eps
exact_dists, exact_idx = nnbrs.radius_neighbors(query, radius=1 - eps)
approx_dists, approx_idx = lsfh.radius_neighbors(query, radius=1 - eps)
assert_array_equal(np.sort(exact_idx[0]), [0, 1])
assert_array_equal(np.sort(approx_idx[0]), [0, 1])
assert_array_almost_equal(np.sort(exact_dists[0]), dists[:-2])
assert_array_almost_equal(np.sort(approx_dists[0]), dists[:-2])
def test_distances():
# Checks whether returned neighbors are from closest to farthest.
n_samples = 12
n_features = 2
n_iter = 10
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest()
ignore_warnings(lshf.fit)(X)
for i in range(n_iter):
n_neighbors = rng.randint(0, n_samples)
query = X[rng.randint(0, n_samples)].reshape(1, -1)
distances, neighbors = lshf.kneighbors(query,
n_neighbors=n_neighbors,
return_distance=True)
# Returned neighbors should be from closest to farthest, that is
# increasing distance values.
assert_true(np.all(np.diff(distances[0]) >= 0))
# Note: the radius_neighbors method does not guarantee the order of
# the results.
def test_fit():
# Checks whether `fit` method sets all attribute values correctly.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators)
ignore_warnings(lshf.fit)(X)
# _input_array = X
assert_array_equal(X, lshf._fit_X)
# A hash function g(p) for each tree
assert_equal(n_estimators, len(lshf.hash_functions_))
# Hash length = 32
assert_equal(32, lshf.hash_functions_[0].components_.shape[0])
# Number of trees_ in the forest
assert_equal(n_estimators, len(lshf.trees_))
# Each tree has entries for every data point
assert_equal(n_samples, len(lshf.trees_[0]))
# Original indices after sorting the hashes
assert_equal(n_estimators, len(lshf.original_indices_))
# Each set of original indices in a tree has entries for every data point
assert_equal(n_samples, len(lshf.original_indices_[0]))
def test_partial_fit():
# Checks whether inserting array is consitent with fitted data.
# `partial_fit` method should set all attribute values correctly.
n_samples = 12
n_samples_partial_fit = 3
n_features = 2
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
X_partial_fit = rng.rand(n_samples_partial_fit, n_features)
lshf = LSHForest()
# Test unfitted estimator
ignore_warnings(lshf.partial_fit)(X)
assert_array_equal(X, lshf._fit_X)
ignore_warnings(lshf.fit)(X)
# Insert wrong dimension
assert_raises(ValueError, lshf.partial_fit,
np.random.randn(n_samples_partial_fit, n_features - 1))
ignore_warnings(lshf.partial_fit)(X_partial_fit)
# size of _input_array = samples + 1 after insertion
assert_equal(lshf._fit_X.shape[0],
n_samples + n_samples_partial_fit)
# size of original_indices_[1] = samples + 1
assert_equal(len(lshf.original_indices_[0]),
n_samples + n_samples_partial_fit)
# size of trees_[1] = samples + 1
assert_equal(len(lshf.trees_[1]),
n_samples + n_samples_partial_fit)
def test_hash_functions():
# Checks randomness of hash functions.
# Variance and mean of each hash function (projection vector)
# should be different from flattened array of hash functions.
# If hash functions are not randomly built (seeded with
# same value), variances and means of all functions are equal.
n_samples = 12
n_features = 2
n_estimators = 5
rng = np.random.RandomState(42)
X = rng.rand(n_samples, n_features)
lshf = LSHForest(n_estimators=n_estimators,
random_state=rng.randint(0, np.iinfo(np.int32).max))
ignore_warnings(lshf.fit)(X)
hash_functions = []
for i in range(n_estimators):
hash_functions.append(lshf.hash_functions_[i].components_)
for i in range(n_estimators):
assert_not_equal(np.var(hash_functions),
np.var(lshf.hash_functions_[i].components_))
for i in range(n_estimators):
assert_not_equal(np.mean(hash_functions),
np.mean(lshf.hash_functions_[i].components_))
def test_candidates():
# Checks whether candidates are sufficient.
# This should handle the cases when number of candidates is 0.
# User should be warned when number of candidates is less than
# requested number of neighbors.
X_train = np.array([[5, 5, 2], [21, 5, 5], [1, 1, 1], [8, 9, 1],
[6, 10, 2]], dtype=np.float32)
X_test = np.array([7, 10, 3], dtype=np.float32).reshape(1, -1)
# For zero candidates
lshf = LSHForest(min_hash_match=32)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (3, 32))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=3)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=3)
assert_equal(distances.shape[1], 3)
# For candidates less than n_neighbors
lshf = LSHForest(min_hash_match=31)
ignore_warnings(lshf.fit)(X_train)
message = ("Number of candidates is not sufficient to retrieve"
" %i neighbors with"
" min_hash_match = %i. Candidates are filled up"
" uniformly from unselected"
" indices." % (5, 31))
assert_warns_message(UserWarning, message, lshf.kneighbors,
X_test, n_neighbors=5)
distances, neighbors = lshf.kneighbors(X_test, n_neighbors=5)
assert_equal(distances.shape[1], 5)
def test_graphs():
# Smoke tests for graph methods.
n_samples_sizes = [5, 10, 20]
n_features = 3
rng = np.random.RandomState(42)
for n_samples in n_samples_sizes:
X = rng.rand(n_samples, n_features)
lshf = LSHForest(min_hash_match=0)
ignore_warnings(lshf.fit)(X)
kneighbors_graph = lshf.kneighbors_graph(X)
radius_neighbors_graph = lshf.radius_neighbors_graph(X)
assert_equal(kneighbors_graph.shape[0], n_samples)
assert_equal(kneighbors_graph.shape[1], n_samples)
assert_equal(radius_neighbors_graph.shape[0], n_samples)
assert_equal(radius_neighbors_graph.shape[1], n_samples)
def test_sparse_input():
# note: Fixed random state in sp.rand is not supported in older scipy.
# The test should succeed regardless.
X1 = sp.rand(50, 100)
X2 = sp.rand(10, 100)
forest_sparse = LSHForest(radius=1, random_state=0).fit(X1)
forest_dense = LSHForest(radius=1, random_state=0).fit(X1.A)
d_sparse, i_sparse = forest_sparse.kneighbors(X2, return_distance=True)
d_dense, i_dense = forest_dense.kneighbors(X2.A, return_distance=True)
assert_almost_equal(d_sparse, d_dense)
assert_almost_equal(i_sparse, i_dense)
d_sparse, i_sparse = forest_sparse.radius_neighbors(X2,
return_distance=True)
d_dense, i_dense = forest_dense.radius_neighbors(X2.A,
return_distance=True)
assert_equal(d_sparse.shape, d_dense.shape)
for a, b in zip(d_sparse, d_dense):
assert_almost_equal(a, b)
for a, b in zip(i_sparse, i_dense):
assert_almost_equal(a, b)
| bsd-3-clause |
Vimos/scikit-learn | examples/svm/plot_separating_hyperplane_unbalanced.py | 25 | 1866 | """
=================================================
SVM: Separating hyperplane for unbalanced classes
=================================================
Find the optimal separating hyperplane using an SVC for classes that
are unbalanced.
We first find the separating plane with a plain SVC and then plot
(dashed) the separating hyperplane with automatically correction for
unbalanced classes.
.. currentmodule:: sklearn.linear_model
.. note::
This example will also work by replacing ``SVC(kernel="linear")``
with ``SGDClassifier(loss="hinge")``. Setting the ``loss`` parameter
of the :class:`SGDClassifier` equal to ``hinge`` will yield behaviour
such as that of a SVC with a linear kernel.
For example try instead of the ``SVC``::
clf = SGDClassifier(n_iter=100, alpha=0.01)
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#from sklearn.linear_model import SGDClassifier
# we create 40 separable points
rng = np.random.RandomState(0)
n_samples_1 = 1000
n_samples_2 = 100
X = np.r_[1.5 * rng.randn(n_samples_1, 2),
0.5 * rng.randn(n_samples_2, 2) + [2, 2]]
y = [0] * (n_samples_1) + [1] * (n_samples_2)
# fit the model and get the separating hyperplane
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, y)
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - clf.intercept_[0] / w[1]
# get the separating hyperplane using weighted classes
wclf = svm.SVC(kernel='linear', class_weight={1: 10})
wclf.fit(X, y)
ww = wclf.coef_[0]
wa = -ww[0] / ww[1]
wyy = wa * xx - wclf.intercept_[0] / ww[1]
# plot separating hyperplanes and samples
h0 = plt.plot(xx, yy, 'k-', label='no weights')
h1 = plt.plot(xx, wyy, 'k--', label='with weights')
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired, edgecolors='k')
plt.legend()
plt.axis('tight')
plt.show()
| bsd-3-clause |
spbguru/repo1 | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_emf.py | 69 | 22336 | """
Enhanced Metafile backend. See http://pyemf.sourceforge.net for the EMF
driver library.
"""
from __future__ import division
try:
import pyemf
except ImportError:
raise ImportError('You must first install pyemf from http://pyemf.sf.net')
import os,sys,math,re
from matplotlib import verbose, __version__, rcParams
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase
from matplotlib.figure import Figure
from matplotlib.transforms import Bbox
from matplotlib.font_manager import findfont, FontProperties
from matplotlib.ft2font import FT2Font, KERNING_UNFITTED, KERNING_DEFAULT, KERNING_UNSCALED
# Font handling stuff snarfed from backend_ps, but only using TTF fonts
_fontd = {}
# Debug print stuff
debugHandle = False
debugPrint = False
debugText = False
# Hashable font properties class. In EMF, angle of rotation is a part
# of the font properties, so a handle to a new font must be obtained
# if the rotation changes.
class EMFFontProperties(FontProperties):
def __init__(self,other,angle):
FontProperties.__init__(self,other.get_family(),
other.get_style(),
other.get_variant(),
other.get_weight(),
other.get_stretch(),
other.get_size())
self.__angle=angle
def __hash__(self):
return hash( (FontProperties.__hash__(self), self.__angle))
def __str__(self):
return str( (FontProperties.__str__(self), self.__angle))
def set_angle(self,angle):
self.__angle=angle
# Hashable pen (line style) properties.
class EMFPen:
def __init__(self,emf,gc):
self.emf=emf
self.gc=gc
r,g,b=gc.get_rgb()
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
self.width=int(gc.get_linewidth())
self.style=0
self.set_linestyle()
if debugHandle: print "EMFPen: style=%d width=%d rgb=(%d,%d,%d)" % (self.style,self.width,self.r,self.g,self.b)
def __hash__(self):
return hash((self.style,self.width,self.r,self.g,self.b))
def set_linestyle(self):
# Hack. Negative width lines will not get drawn.
if self.width<0:
self.style=pyemf.PS_NULL
else:
styles={'solid':pyemf.PS_SOLID, 'dashed':pyemf.PS_DASH,
'dashdot':pyemf.PS_DASHDOT, 'dotted':pyemf.PS_DOT}
#style=styles.get(self.gc.get_linestyle('solid'))
style=self.gc.get_linestyle('solid')
if debugHandle: print "EMFPen: style=%d" % style
if style in styles:
self.style=styles[style]
else:
self.style=pyemf.PS_SOLID
def get_handle(self):
handle=self.emf.CreatePen(self.style,self.width,(self.r,self.g,self.b))
return handle
# Hashable brush (fill style) properties.
class EMFBrush:
def __init__(self,emf,rgb):
self.emf=emf
r,g,b=rgb
self.r=int(r*255)
self.g=int(g*255)
self.b=int(b*255)
if debugHandle: print "EMFBrush: rgb=(%d,%d,%d)" % (self.r,self.g,self.b)
def __hash__(self):
return hash((self.r,self.g,self.b))
def get_handle(self):
handle=self.emf.CreateSolidBrush((self.r,self.g,self.b))
return handle
class RendererEMF(RendererBase):
"""
The renderer handles drawing/rendering operations through a
pyemf.EMF instance.
"""
def __init__(self, outfile, width, height, dpi):
"Initialize the renderer with a gd image instance"
self.outfile = outfile
# a map from get_color args to colors
self._cached = {}
# dict of hashed properties to already created font handles
self._fontHandle = {}
self.lastHandle = {'font':-1, 'pen':-1, 'brush':-1}
self.emf=pyemf.EMF(width,height,dpi,'in')
self.width=int(width*dpi)
self.height=int(height*dpi)
self.dpi = dpi
self.pointstodpi = dpi/72.0
self.hackPointsForMathExponent = 2.0
# set background transparent for text
self.emf.SetBkMode(pyemf.TRANSPARENT)
# set baseline for text to be bottom left corner
self.emf.SetTextAlign( pyemf.TA_BOTTOM|pyemf.TA_LEFT)
if debugPrint: print "RendererEMF: (%f,%f) %s dpi=%f" % (self.width,self.height,outfile,dpi)
def save(self):
self.emf.save(self.outfile)
def draw_arc(self, gcEdge, rgbFace, x, y, width, height, angle1, angle2, rotation):
"""
Draw an arc using GraphicsContext instance gcEdge, centered at x,y,
with width and height and angles from 0.0 to 360.0
0 degrees is at 3-o'clock
positive angles are anti-clockwise
If the color rgbFace is not None, fill the arc with it.
"""
if debugPrint: print "draw_arc: (%f,%f) angles=(%f,%f) w,h=(%f,%f)" % (x,y,angle1,angle2,width,height)
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
# This algorithm doesn't work very well on small circles
# because of rounding error. This shows up most obviously on
# legends where the circles are small anyway, and it is
# compounded by the fact that it puts several circles right
# next to each other so the differences are obvious.
hw=width/2
hh=height/2
x1=int(x-width/2)
y1=int(y-height/2)
if brush:
self.emf.Pie(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
else:
self.emf.Arc(int(x-hw),int(self.height-(y-hh)),int(x+hw),int(self.height-(y+hh)),int(x+math.cos(angle1*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle1*math.pi/180.0)*hh)),int(x+math.cos(angle2*math.pi/180.0)*hw),int(self.height-(y+math.sin(angle2*math.pi/180.0)*hh)))
def draw_image(self, x, y, im, bbox):
"""
Draw the Image instance into the current axes; x is the
distance in pixels from the left hand side of the canvas. y is
the distance from the origin. That is, if origin is upper, y
is the distance from top. If origin is lower, y is the
distance from bottom
bbox is a matplotlib.transforms.BBox instance for clipping, or
None
"""
# pyemf2 currently doesn't support bitmaps.
pass
def draw_line(self, gc, x1, y1, x2, y2):
"""
Draw a single line from x1,y1 to x2,y2
"""
if debugPrint: print "draw_line: (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
if self.select_pen(gc):
self.emf.Polyline([(long(x1),long(self.height-y1)),(long(x2),long(self.height-y2))])
else:
if debugPrint: print "draw_line: optimizing away (%f,%f) - (%f,%f)" % (x1,y1,x2,y2)
def draw_lines(self, gc, x, y):
"""
x and y are equal length arrays, draw lines connecting each
point in x, y
"""
if debugPrint: print "draw_lines: %d points" % len(str(x))
# optimize away anything that won't actually be drawn. Edge
# style must not be PS_NULL for it to appear on screen.
if self.select_pen(gc):
points = [(long(x[i]), long(self.height-y[i])) for i in range(len(x))]
self.emf.Polyline(points)
def draw_point(self, gc, x, y):
"""
Draw a single point at x,y
Where 'point' is a device-unit point (or pixel), not a matplotlib point
"""
if debugPrint: print "draw_point: (%f,%f)" % (x,y)
# don't cache this pen
pen=EMFPen(self.emf,gc)
self.emf.SetPixel(long(x),long(self.height-y),(pen.r,pen.g,pen.b))
def draw_polygon(self, gcEdge, rgbFace, points):
"""
Draw a polygon using the GraphicsContext instance gc.
points is a len vertices tuple, each element
giving the x,y coords a vertex
If the color rgbFace is not None, fill the polygon with it
"""
if debugPrint: print "draw_polygon: %d points" % len(points)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
points = [(long(x), long(self.height-y)) for x,y in points]
self.emf.Polygon(points)
else:
points = [(long(x), long(self.height-y)) for x,y in points]
if debugPrint: print "draw_polygon: optimizing away polygon: %d points = %s" % (len(points),str(points))
def draw_rectangle(self, gcEdge, rgbFace, x, y, width, height):
"""
Draw a non-filled rectangle using the GraphicsContext instance gcEdge,
with lower left at x,y with width and height.
If rgbFace is not None, fill the rectangle with it.
"""
if debugPrint: print "draw_rectangle: (%f,%f) w=%f,h=%f" % (x,y,width,height)
# optimize away anything that won't actually draw. Either a
# face color or edge style must be defined
pen=self.select_pen(gcEdge)
brush=self.select_brush(rgbFace)
if pen or brush:
self.emf.Rectangle(int(x),int(self.height-y),int(x)+int(width),int(self.height-y)-int(height))
else:
if debugPrint: print "draw_rectangle: optimizing away (%f,%f) w=%f,h=%f" % (x,y,width,height)
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
"""
Draw the text.Text instance s at x,y (display coords) with font
properties instance prop at angle in degrees, using GraphicsContext gc
**backend implementers note**
When you are trying to determine if you have gotten your bounding box
right (which is what enables the text layout/alignment to work
properly), it helps to change the line in text.py
if 0: bbox_artist(self, renderer)
to if 1, and then the actual bounding box will be blotted along with
your text.
"""
if debugText: print "draw_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if ismath:
self.draw_math_text(gc,x,y,s,prop,angle)
else:
self.draw_plain_text(gc,x,y,s,prop,angle)
def draw_plain_text(self, gc, x, y, s, prop, angle):
"""
Draw a text string verbatim; no conversion is done.
"""
if debugText: print "draw_plain_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
if debugText: print " properties:\n"+str(prop)
self.select_font(prop,angle)
# haxor follows! The subtleties of text placement in EMF
# still elude me a bit. It always seems to be too high on the
# page, about 10 pixels too high on a 300dpi resolution image.
# So, I'm adding this hack for the moment:
hackoffsetper300dpi=10
xhack=math.sin(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
yhack=math.cos(angle*math.pi/180.0)*hackoffsetper300dpi*self.dpi/300.0
self.emf.TextOut(long(x+xhack),long(y+yhack),s)
def draw_math_text(self, gc, x, y, s, prop, angle):
"""
Draw a subset of TeX, currently handles exponents only. Since
pyemf doesn't have any raster functionality yet, the
texmanager.get_rgba won't help.
"""
if debugText: print "draw_math_text: (%f,%f) %d degrees: '%s'" % (x,y,angle,s)
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
self.draw_plain_text(gc,x,y,"10",prop,angle)
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
self.draw_plain_text(gc,x+w+self.points_to_pixels(self.hackPointsForMathExponent),y-(h/2),exp,propexp,angle)
else:
# if it isn't an exponent, then render the raw TeX string.
self.draw_plain_text(gc,x,y,s,prop,angle)
def get_math_text_width_height(self, s, prop):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps. This
method must be kept in sync with draw_math_text.
"""
if debugText: print "get_math_text_width_height:"
s = s[1:-1] # strip the $ from front and back
match=re.match("10\^\{(.+)\}",s)
if match:
exp=match.group(1)
if debugText: print " exponent=%s" % exp
font = self._get_font_ttf(prop)
font.set_text("10", 0.0)
w1, h1 = font.get_width_height()
propexp=prop.copy()
propexp.set_size(prop.get_size_in_points()*.8)
fontexp=self._get_font_ttf(propexp)
fontexp.set_text(exp, 0.0)
w2, h2 = fontexp.get_width_height()
w=w1+w2
h=h1+(h2/2)
w /= 64.0 # convert from subpixels
h /= 64.0
w+=self.points_to_pixels(self.hackPointsForMathExponent)
if debugText: print " math string=%s w,h=(%f,%f)" % (s, w, h)
else:
w,h=self.get_text_width_height(s,prop,False)
return w, h
def flipy(self):
"""return true if y small numbers are top for renderer
Is used for drawing text (text.py) and images (image.py) only
"""
return True
def get_canvas_width_height(self):
"""
return the canvas width and height in display coords
"""
return self.width,self.height
def set_handle(self,type,handle):
"""
Update the EMF file with the current handle, but only if it
isn't the same as the last one. Don't want to flood the file
with duplicate info.
"""
if self.lastHandle[type] != handle:
self.emf.SelectObject(handle)
self.lastHandle[type]=handle
def get_font_handle(self, prop, angle):
"""
Look up the handle for the font based on the dict of
properties *and* the rotation angle, since in EMF the font
rotation is a part of the font definition.
"""
prop=EMFFontProperties(prop,angle)
size=int(prop.get_size_in_points()*self.pointstodpi)
face=prop.get_name()
key = hash(prop)
handle = self._fontHandle.get(key)
if handle is None:
handle=self.emf.CreateFont(-size, 0, int(angle)*10, int(angle)*10,
pyemf.FW_NORMAL, 0, 0, 0,
pyemf.ANSI_CHARSET, pyemf.OUT_DEFAULT_PRECIS,
pyemf.CLIP_DEFAULT_PRECIS, pyemf.DEFAULT_QUALITY,
pyemf.DEFAULT_PITCH | pyemf.FF_DONTCARE, face);
if debugHandle: print "get_font_handle: creating handle=%d for face=%s size=%d" % (handle,face,size)
self._fontHandle[key]=handle
if debugHandle: print " found font handle %d for face=%s size=%d" % (handle,face,size)
self.set_handle("font",handle)
return handle
def select_font(self,prop,angle):
handle=self.get_font_handle(prop,angle)
self.set_handle("font",handle)
def select_pen(self, gc):
"""
Select a pen that includes the color, line width and line
style. Return the pen if it will draw a line, or None if the
pen won't produce any output (i.e. the style is PS_NULL)
"""
pen=EMFPen(self.emf,gc)
key=hash(pen)
handle=self._fontHandle.get(key)
if handle is None:
handle=pen.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found pen handle %d" % handle
self.set_handle("pen",handle)
if pen.style != pyemf.PS_NULL:
return pen
else:
return None
def select_brush(self, rgb):
"""
Select a fill color, and return the brush if the color is
valid or None if this won't produce a fill operation.
"""
if rgb is not None:
brush=EMFBrush(self.emf,rgb)
key=hash(brush)
handle=self._fontHandle.get(key)
if handle is None:
handle=brush.get_handle()
self._fontHandle[key]=handle
if debugHandle: print " found brush handle %d" % handle
self.set_handle("brush",handle)
return brush
else:
return None
def _get_font_ttf(self, prop):
"""
get the true type font properties, used because EMFs on
windows will use true type fonts.
"""
key = hash(prop)
font = _fontd.get(key)
if font is None:
fname = findfont(prop)
if debugText: print "_get_font_ttf: name=%s" % fname
font = FT2Font(str(fname))
_fontd[key] = font
font.clear()
size = prop.get_size_in_points()
font.set_size(size, self.dpi)
return font
def get_text_width_height(self, s, prop, ismath):
"""
get the width and height in display coords of the string s
with FontPropertry prop, ripped right out of backend_ps
"""
if debugText: print "get_text_width_height: ismath=%s properties: %s" % (str(ismath),str(prop))
if ismath:
if debugText: print " MATH TEXT! = %s" % str(ismath)
w,h = self.get_math_text_width_height(s, prop)
return w,h
font = self._get_font_ttf(prop)
font.set_text(s, 0.0)
w, h = font.get_width_height()
w /= 64.0 # convert from subpixels
h /= 64.0
if debugText: print " text string=%s w,h=(%f,%f)" % (s, w, h)
return w, h
def new_gc(self):
return GraphicsContextEMF()
def points_to_pixels(self, points):
# if backend doesn't have dpi, eg, postscript or svg
#return points
# elif backend assumes a value for pixels_per_inch
#return points/72.0 * self.dpi.get() * pixels_per_inch/72.0
# else
return points/72.0 * self.dpi
class GraphicsContextEMF(GraphicsContextBase):
"""
The graphics context provides the color, line styles, etc... See the gtk
and postscript backends for examples of mapping the graphics context
attributes (cap styles, join styles, line widths, colors) to a particular
backend. In GTK this is done by wrapping a gtk.gdk.GC object and
forwarding the appropriate calls to it using a dictionary mapping styles
to gdk constants. In Postscript, all the work is done by the renderer,
mapping line styles to postscript calls.
If it's more appropriate to do the mapping at the renderer level (as in
the postscript backend), you don't need to override any of the GC methods.
If it's more appropriate to wrap an instance (as in the GTK backend) and
do the mapping here, you'll need to override several of the setter
methods.
The base GraphicsContext stores colors as a RGB tuple on the unit
interval, eg, (0.5, 0.0, 1.0). You may need to map this to colors
appropriate for your backend.
"""
pass
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For image backends - is not required
For GUI backends - this should be overriden if drawing should be done in
interactive python mode
"""
pass
def show():
"""
For image backends - is not required
For GUI backends - show() is usually the last line of a pylab script and
tells the backend that it is time to draw. In interactive mode, this may
be a do nothing func. See the GTK backend for an example of how to handle
interactive versus batch mode
"""
for manager in Gcf.get_all_fig_managers():
# do something to display the GUI
pass
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
# if a main-level app must be created, this is the usual place to
# do it -- see backend_wx, backend_wxagg and backend_tkagg for
# examples. Not all GUIs require explicit instantiation of a
# main-level app (egg backend_gtk, backend_gtkagg) for pylab
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasEMF(thisFig)
manager = FigureManagerEMF(canvas, num)
return manager
class FigureCanvasEMF(FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
"""
def draw(self):
"""
Draw the figure using the renderer
"""
pass
filetypes = {'emf': 'Enhanced Metafile'}
def print_emf(self, filename, dpi=300, **kwargs):
width, height = self.figure.get_size_inches()
renderer = RendererEMF(filename,width,height,dpi)
self.figure.draw(renderer)
renderer.save()
def get_default_filetype(self):
return 'emf'
class FigureManagerEMF(FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
For non interactive backends, the base class does all the work
"""
pass
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerEMF
| gpl-3.0 |
huzq/scikit-learn | sklearn/impute/tests/test_knn.py | 15 | 17366 | import numpy as np
import pytest
from sklearn import config_context
from sklearn.impute import KNNImputer
from sklearn.metrics.pairwise import nan_euclidean_distances
from sklearn.metrics.pairwise import pairwise_distances
from sklearn.neighbors import KNeighborsRegressor
from sklearn.utils._testing import assert_allclose
@pytest.mark.parametrize("weights", ["uniform", "distance"])
@pytest.mark.parametrize("n_neighbors", range(1, 6))
def test_knn_imputer_shape(weights, n_neighbors):
# Verify the shapes of the imputed matrix for different weights and
# number of neighbors.
n_rows = 10
n_cols = 2
X = np.random.rand(n_rows, n_cols)
X[0, 0] = np.nan
imputer = KNNImputer(n_neighbors=n_neighbors, weights=weights)
X_imputed = imputer.fit_transform(X)
assert X_imputed.shape == (n_rows, n_cols)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_default_with_invalid_input(na):
# Test imputation with default values and invalid input
# Test with inf present
X = np.array([
[np.inf, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
with pytest.raises(ValueError, match="Input contains (infinity|NaN)"):
KNNImputer(missing_values=na).fit(X)
# Test with inf present in matrix passed in transform()
X = np.array([
[np.inf, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
X_fit = np.array([
[0, 1, 1, 2, na],
[2, 1, 2, 2, 3],
[3, 2, 3, 3, 8],
[na, 6, 0, 5, 13],
[na, 7, 0, 7, 8],
[6, 6, 2, 5, 7],
])
imputer = KNNImputer(missing_values=na).fit(X_fit)
with pytest.raises(ValueError, match="Input contains (infinity|NaN)"):
imputer.transform(X)
# negative n_neighbors
with pytest.raises(ValueError, match="Expected n_neighbors > 0"):
KNNImputer(missing_values=na, n_neighbors=0).fit(X_fit)
# Test with missing_values=0 when NaN present
imputer = KNNImputer(missing_values=0, n_neighbors=2, weights="uniform")
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
msg = (r"Input contains NaN, infinity or a value too large for "
r"dtype\('float64'\)")
with pytest.raises(ValueError, match=msg):
imputer.fit(X)
X = np.array([
[0, 0],
[np.nan, 2],
])
# Test with a metric type without NaN support
imputer = KNNImputer(metric="euclidean")
bad_metric_msg = "The selected metric does not support NaN values"
with pytest.raises(ValueError, match=bad_metric_msg):
imputer.fit(X)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_removes_all_na_features(na):
X = np.array([
[1, 1, na, 1, 1, 1.],
[2, 3, na, 2, 2, 2],
[3, 4, na, 3, 3, na],
[6, 4, na, na, 6, 6],
])
knn = KNNImputer(missing_values=na, n_neighbors=2).fit(X)
X_transform = knn.transform(X)
assert not np.isnan(X_transform).any()
assert X_transform.shape == (4, 5)
X_test = np.arange(0, 12).reshape(2, 6)
X_transform = knn.transform(X_test)
assert_allclose(X_test[:, [0, 1, 3, 4, 5]], X_transform)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_zero_nan_imputes_the_same(na):
# Test with an imputable matrix and compare with different missing_values
X_zero = np.array([
[1, 0, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 0],
[6, 6, 0, 6, 6],
])
X_nan = np.array([
[1, na, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, na],
[6, 6, na, 6, 6],
])
X_imputed = np.array([
[1, 2.5, 1, 1, 1.],
[2, 2, 2, 2, 2],
[3, 3, 3, 3, 1.5],
[6, 6, 2.5, 6, 6],
])
imputer_zero = KNNImputer(missing_values=0, n_neighbors=2,
weights="uniform")
imputer_nan = KNNImputer(missing_values=na, n_neighbors=2,
weights="uniform")
assert_allclose(imputer_zero.fit_transform(X_zero), X_imputed)
assert_allclose(imputer_zero.fit_transform(X_zero),
imputer_nan.fit_transform(X_nan))
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_verify(na):
# Test with an imputable matrix
X = np.array([
[1, 0, 0, 1],
[2, 1, 2, na],
[3, 2, 3, na],
[na, 4, 5, 5],
[6, na, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
X_imputed = np.array([
[1, 0, 0, 1],
[2, 1, 2, 8],
[3, 2, 3, 8],
[4, 4, 5, 5],
[6, 3, 6, 7],
[8, 8, 8, 8],
[16, 15, 18, 19],
])
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test when there is not enough neighbors
X = np.array([
[1, 0, 0, na],
[2, 1, 2, na],
[3, 2, 3, na],
[4, 4, 5, na],
[6, 7, 6, na],
[8, 8, 8, na],
[20, 20, 20, 20],
[22, 22, 22, 22]
])
# Not enough neighbors, use column mean from training
X_impute_value = (20 + 22) / 2
X_imputed = np.array([
[1, 0, 0, X_impute_value],
[2, 1, 2, X_impute_value],
[3, 2, 3, X_impute_value],
[4, 4, 5, X_impute_value],
[6, 7, 6, X_impute_value],
[8, 8, 8, X_impute_value],
[20, 20, 20, 20],
[22, 22, 22, 22]
])
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test when data in fit() and transform() are different
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 16]
])
X1 = np.array([
[1, 0],
[3, 2],
[4, na]
])
X_2_1 = (0 + 3 + 6 + 7 + 8) / 5
X1_imputed = np.array([
[1, 0],
[3, 2],
[4, X_2_1]
])
imputer = KNNImputer(missing_values=na)
assert_allclose(imputer.fit(X).transform(X1), X1_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_one_n_neighbors(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, na],
[7, 7],
[na, 8],
[14, 13]
])
X_imputed = np.array([
[0, 0],
[4, 2],
[4, 3],
[5, 3],
[7, 7],
[7, 8],
[14, 13]
])
imputer = KNNImputer(n_neighbors=1, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_all_samples_are_neighbors(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, na],
[7, 7],
[na, 8],
[14, 13]
])
X_imputed = np.array([
[0, 0],
[6, 2],
[4, 3],
[5, 5.5],
[7, 7],
[6, 8],
[14, 13]
])
n_neighbors = X.shape[0] - 1
imputer = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
n_neighbors = X.shape[0]
imputer_plus1 = KNNImputer(n_neighbors=n_neighbors, missing_values=na)
assert_allclose(imputer_plus1.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_weight_uniform(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# Test with "uniform" weight (or unweighted)
X_imputed_uniform = np.array([
[0, 0],
[5, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
imputer = KNNImputer(weights="uniform", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
# Test with "callable" weight
def no_weight(dist):
return None
imputer = KNNImputer(weights=no_weight, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
# Test with "callable" uniform weight
def uniform_weight(dist):
return np.ones_like(dist)
imputer = KNNImputer(weights=uniform_weight, missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_uniform)
@pytest.mark.parametrize("na", [np.nan, -1])
def test_knn_imputer_weight_distance(na):
X = np.array([
[0, 0],
[na, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# Test with "distance" weight
nn = KNeighborsRegressor(metric="euclidean", weights="distance")
X_rows_idx = [0, 2, 3, 4, 5, 6]
nn.fit(X[X_rows_idx, 1:], X[X_rows_idx, 0])
knn_imputed_value = nn.predict(X[1:2, 1:])[0]
# Manual calculation
X_neighbors_idx = [0, 2, 3, 4, 5]
dist = nan_euclidean_distances(X[1:2, :], X, missing_values=na)
weights = 1 / dist[:, X_neighbors_idx].ravel()
manual_imputed_value = np.average(X[X_neighbors_idx, 0], weights=weights)
X_imputed_distance1 = np.array([
[0, 0],
[manual_imputed_value, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
# NearestNeighbor calculation
X_imputed_distance2 = np.array([
[0, 0],
[knn_imputed_value, 2],
[4, 3],
[5, 6],
[7, 7],
[9, 8],
[11, 10]
])
imputer = KNNImputer(weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed_distance1)
assert_allclose(imputer.fit_transform(X), X_imputed_distance2)
# Test with weights = "distance" and n_neighbors=2
X = np.array([
[na, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
])
# neighbors are rows 1, 2, the nan_euclidean_distances are:
dist_0_1 = np.sqrt((3/2)*((1 - 0)**2 + (2 - 0)**2))
dist_0_2 = np.sqrt((3/2)*((2 - 0)**2 + (3 - 0)**2))
imputed_value = np.average([2, 3], weights=[1 / dist_0_1, 1 / dist_0_2])
X_imputed = np.array([
[imputed_value, 0, 0],
[2, 1, 2],
[3, 2, 3],
[4, 5, 5],
])
imputer = KNNImputer(n_neighbors=2, weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
# Test with varying missingness patterns
X = np.array([
[1, 0, 0, 1],
[0, na, 1, na],
[1, 1, 1, na],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
])
# Get weights of donor neighbors
dist = nan_euclidean_distances(X, missing_values=na)
r1c1_nbor_dists = dist[1, [0, 2, 3, 4, 5]]
r1c3_nbor_dists = dist[1, [0, 3, 4, 5, 6]]
r1c1_nbor_wt = 1 / r1c1_nbor_dists
r1c3_nbor_wt = 1 / r1c3_nbor_dists
r2c3_nbor_dists = dist[2, [0, 3, 4, 5, 6]]
r2c3_nbor_wt = 1 / r2c3_nbor_dists
# Collect donor values
col1_donor_values = np.ma.masked_invalid(X[[0, 2, 3, 4, 5], 1]).copy()
col3_donor_values = np.ma.masked_invalid(X[[0, 3, 4, 5, 6], 3]).copy()
# Final imputed values
r1c1_imp = np.ma.average(col1_donor_values, weights=r1c1_nbor_wt)
r1c3_imp = np.ma.average(col3_donor_values, weights=r1c3_nbor_wt)
r2c3_imp = np.ma.average(col3_donor_values, weights=r2c3_nbor_wt)
X_imputed = np.array([
[1, 0, 0, 1],
[0, r1c1_imp, 1, r1c3_imp],
[1, 1, 1, r2c3_imp],
[0, 1, 0, 0],
[0, 0, 0, 0],
[1, 0, 1, 1],
[10, 10, 10, 10],
])
imputer = KNNImputer(weights="distance", missing_values=na)
assert_allclose(imputer.fit_transform(X), X_imputed)
X = np.array([
[0, 0, 0, na],
[1, 1, 1, na],
[2, 2, na, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[na, 7, 7, 7]
])
dist = pairwise_distances(X, metric="nan_euclidean", squared=False,
missing_values=na)
# Calculate weights
r0c3_w = 1.0 / dist[0, 2:-1]
r1c3_w = 1.0 / dist[1, 2:-1]
r2c2_w = 1.0 / dist[2, (0, 1, 3, 4, 5)]
r7c0_w = 1.0 / dist[7, 2:7]
# Calculate weighted averages
r0c3 = np.average(X[2:-1, -1], weights=r0c3_w)
r1c3 = np.average(X[2:-1, -1], weights=r1c3_w)
r2c2 = np.average(X[(0, 1, 3, 4, 5), 2], weights=r2c2_w)
r7c0 = np.average(X[2:7, 0], weights=r7c0_w)
X_imputed = np.array([
[0, 0, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7]
])
imputer_comp_wt = KNNImputer(missing_values=na, weights="distance")
assert_allclose(imputer_comp_wt.fit_transform(X), X_imputed)
def test_knn_imputer_callable_metric():
# Define callable metric that returns the l1 norm:
def custom_callable(x, y, missing_values=np.nan, squared=False):
x = np.ma.array(x, mask=np.isnan(x))
y = np.ma.array(y, mask=np.isnan(y))
dist = np.nansum(np.abs(x-y))
return dist
X = np.array([
[4, 3, 3, np.nan],
[6, 9, 6, 9],
[4, 8, 6, 9],
[np.nan, 9, 11, 10.]
])
X_0_3 = (9 + 9) / 2
X_3_0 = (6 + 4) / 2
X_imputed = np.array([
[4, 3, 3, X_0_3],
[6, 9, 6, 9],
[4, 8, 6, 9],
[X_3_0, 9, 11, 10.]
])
imputer = KNNImputer(n_neighbors=2, metric=custom_callable)
assert_allclose(imputer.fit_transform(X), X_imputed)
@pytest.mark.parametrize("working_memory", [None, 0])
@pytest.mark.parametrize("na", [-1, np.nan])
# Note that we use working_memory=0 to ensure that chunking is tested, even
# for a small dataset. However, it should raise a UserWarning that we ignore.
@pytest.mark.filterwarnings("ignore:adhere to working_memory")
def test_knn_imputer_with_simple_example(na, working_memory):
X = np.array([
[0, na, 0, na],
[1, 1, 1, na],
[2, 2, na, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[na, 7, 7, 7]
])
r0c1 = np.mean(X[1:6, 1])
r0c3 = np.mean(X[2:-1, -1])
r1c3 = np.mean(X[2:-1, -1])
r2c2 = np.mean(X[[0, 1, 3, 4, 5], 2])
r7c0 = np.mean(X[2:-1, 0])
X_imputed = np.array([
[0, r0c1, 0, r0c3],
[1, 1, 1, r1c3],
[2, 2, r2c2, 2],
[3, 3, 3, 3],
[4, 4, 4, 4],
[5, 5, 5, 5],
[6, 6, 6, 6],
[r7c0, 7, 7, 7]
])
with config_context(working_memory=working_memory):
imputer_comp = KNNImputer(missing_values=na)
assert_allclose(imputer_comp.fit_transform(X), X_imputed)
@pytest.mark.parametrize("na", [-1, np.nan])
@pytest.mark.parametrize("weights", ['uniform', 'distance'])
def test_knn_imputer_not_enough_valid_distances(na, weights):
# Samples with needed feature has nan distance
X1 = np.array([
[na, 11],
[na, 1],
[3, na]
])
X1_imputed = np.array([
[3, 11],
[3, 1],
[3, 6]
])
knn = KNNImputer(missing_values=na, n_neighbors=1, weights=weights)
assert_allclose(knn.fit_transform(X1), X1_imputed)
X2 = np.array([[4, na]])
X2_imputed = np.array([[4, 6]])
assert_allclose(knn.transform(X2), X2_imputed)
@pytest.mark.parametrize("na", [-1, np.nan])
def test_knn_imputer_drops_all_nan_features(na):
X1 = np.array([
[na, 1],
[na, 2]
])
knn = KNNImputer(missing_values=na, n_neighbors=1)
X1_expected = np.array([[1], [2]])
assert_allclose(knn.fit_transform(X1), X1_expected)
X2 = np.array([
[1, 2],
[3, na]
])
X2_expected = np.array([[2], [1.5]])
assert_allclose(knn.transform(X2), X2_expected)
@pytest.mark.parametrize("working_memory", [None, 0])
@pytest.mark.parametrize("na", [-1, np.nan])
def test_knn_imputer_distance_weighted_not_enough_neighbors(na,
working_memory):
X = np.array([
[3, na],
[2, na],
[na, 4],
[5, 6],
[6, 8],
[na, 5]
])
dist = pairwise_distances(X, metric="nan_euclidean", squared=False,
missing_values=na)
X_01 = np.average(X[3:5, 1], weights=1/dist[0, 3:5])
X_11 = np.average(X[3:5, 1], weights=1/dist[1, 3:5])
X_20 = np.average(X[3:5, 0], weights=1/dist[2, 3:5])
X_50 = np.average(X[3:5, 0], weights=1/dist[5, 3:5])
X_expected = np.array([
[3, X_01],
[2, X_11],
[X_20, 4],
[5, 6],
[6, 8],
[X_50, 5]
])
with config_context(working_memory=working_memory):
knn_3 = KNNImputer(missing_values=na, n_neighbors=3,
weights='distance')
assert_allclose(knn_3.fit_transform(X), X_expected)
knn_4 = KNNImputer(missing_values=na, n_neighbors=4,
weights='distance')
assert_allclose(knn_4.fit_transform(X), X_expected)
@pytest.mark.parametrize("na, allow_nan", [(-1, False), (np.nan, True)])
def test_knn_tags(na, allow_nan):
knn = KNNImputer(missing_values=na)
assert knn._get_tags()["allow_nan"] == allow_nan
| bsd-3-clause |
hetland/xray | xray/test/test_utils.py | 2 | 4438 | import numpy as np
import pandas as pd
from xray.core import ops, utils
from xray.core.pycompat import OrderedDict
from . import TestCase
class TestSafeCastToIndex(TestCase):
def test(self):
dates = pd.date_range('2000-01-01', periods=10)
x = np.arange(5)
td = x * np.timedelta64(1, 'D')
for expected, array in [
(dates, dates.values),
(pd.Index(x, dtype=object), x.astype(object)),
(pd.Index(td), td),
(pd.Index(td, dtype=object), td.astype(object)),
]:
actual = utils.safe_cast_to_index(array)
self.assertArrayEqual(expected, actual)
self.assertEqual(expected.dtype, actual.dtype)
class TestArrayEquiv(TestCase):
def test_0d(self):
# verify our work around for pd.isnull not working for 0-dimensional
# object arrays
self.assertTrue(ops.array_equiv(0, np.array(0, dtype=object)))
self.assertTrue(
ops.array_equiv(np.nan, np.array(np.nan, dtype=object)))
self.assertFalse(
ops.array_equiv(0, np.array(1, dtype=object)))
class TestDictionaries(TestCase):
def setUp(self):
self.x = {'a': 'A', 'b': 'B'}
self.y = {'c': 'C', 'b': 'B'}
self.z = {'a': 'Z'}
def test_equivalent(self):
self.assertTrue(utils.equivalent(0, 0))
self.assertTrue(utils.equivalent(np.nan, np.nan))
self.assertTrue(utils.equivalent(0, np.array(0.0)))
self.assertTrue(utils.equivalent([0], np.array([0])))
self.assertTrue(utils.equivalent(np.array([0]), [0]))
self.assertTrue(utils.equivalent(np.arange(3), 1.0 * np.arange(3)))
self.assertFalse(utils.equivalent(0, np.zeros(3)))
def test_safe(self):
# should not raise exception:
utils.update_safety_check(self.x, self.y)
def test_unsafe(self):
with self.assertRaises(ValueError):
utils.update_safety_check(self.x, self.z)
def test_ordered_dict_intersection(self):
self.assertEqual({'b': 'B'},
utils.ordered_dict_intersection(self.x, self.y))
self.assertEqual({}, utils.ordered_dict_intersection(self.x, self.z))
def test_dict_equiv(self):
x = OrderedDict()
x['a'] = 3
x['b'] = np.array([1, 2, 3])
y = OrderedDict()
y['b'] = np.array([1.0, 2.0, 3.0])
y['a'] = 3
self.assertTrue(utils.dict_equiv(x, y)) # two nparrays are equal
y['b'] = [1, 2, 3] # np.array not the same as a list
self.assertTrue(utils.dict_equiv(x, y)) # nparray == list
x['b'] = [1.0, 2.0, 3.0]
self.assertTrue(utils.dict_equiv(x, y)) # list vs. list
x['c'] = None
self.assertFalse(utils.dict_equiv(x, y)) # new key in x
x['c'] = np.nan
y['c'] = np.nan
self.assertTrue(utils.dict_equiv(x, y)) # as intended, nan is nan
x['c'] = np.inf
y['c'] = np.inf
self.assertTrue(utils.dict_equiv(x, y)) # inf == inf
y = dict(y)
self.assertTrue(utils.dict_equiv(x, y)) # different dictionary types are fine
y['b'] = 3 * np.arange(3)
self.assertFalse(utils.dict_equiv(x, y)) # not equal when arrays differ
def test_frozen(self):
x = utils.Frozen(self.x)
with self.assertRaises(TypeError):
x['foo'] = 'bar'
with self.assertRaises(TypeError):
del x['a']
with self.assertRaises(AttributeError):
x.update(self.y)
self.assertEqual(x.mapping, self.x)
self.assertIn(repr(x), ("Frozen({'a': 'A', 'b': 'B'})",
"Frozen({'b': 'B', 'a': 'A'})"))
def test_sorted_keys_dict(self):
x = {'a': 1, 'b': 2, 'c': 3}
y = utils.SortedKeysDict(x)
self.assertItemsEqual(y, ['a', 'b', 'c'])
self.assertEqual(repr(utils.SortedKeysDict()),
"SortedKeysDict({})")
def test_chain_map(self):
m = utils.ChainMap({'x': 0, 'y': 1}, {'x': -100, 'z': 2})
self.assertIn('x', m)
self.assertIn('y', m)
self.assertIn('z', m)
self.assertEqual(m['x'], 0)
self.assertEqual(m['y'], 1)
self.assertEqual(m['z'], 2)
m['x'] = 100
self.assertEqual(m['x'], 100)
self.assertEqual(m.maps[0]['x'], 100)
self.assertItemsEqual(['x', 'y', 'z'], m)
| apache-2.0 |
DreamLiMu/ML_Python | les5/logRegres.py | 1 | 4070 | #-*-coding:utf-8-*-
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
from numpy import *
def loadDataSet():
dataMat = []; labelMat = []
fr = open('testSet.txt')
for line in fr.readlines():
lineArr = line.strip().split()
dataMat.append([1.0,float(lineArr[0]),float(lineArr[1])])
labelMat.append(int(lineArr[2]))
return dataMat,labelMat
def sigmoid(inX):
return 1.0/(1+exp(-inX))
def gradAscent(dataMatIn,classLabels):
dataMatrix = mat(dataMatIn)
labelMat = mat(classLabels).transpose()
m,n = shape(dataMatrix)
alpha = 0.001
maxCycles = 500
weights = ones((n,1))
for k in range(maxCycles):
##矩阵相乘
h = sigmoid(dataMatrix*weights)
error = (labelMat - h)
weights = weights + alpha * dataMatrix.transpose() * error
return weights
def plotBestFit(wei):
import matplotlib.pyplot as plt
##weights = wei.getA() ##调用gradAscent方法时用
weights = wei ##调用stocGradAscent0方法时用
dataMat,labelMat = loadDataSet()
dataArr = array(dataMat)
n = shape(dataArr)[0]
xcord1 = []; ycord1 = []
xcord2 = []; ycord2 = []
for i in range(n):
if int(labelMat[i]) == 1:
xcord1.append(dataArr[i,1]);ycord1.append(dataArr[i,2])
else:
xcord2.append(dataArr[i,1]);ycord2.append(dataArr[i,2])
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(xcord1,ycord1,s=30,c='red',marker='s')
ax.scatter(xcord2,ycord2,s=30,c='green')
x = arange(-3.0,3.0,0.1)
##最佳拟合直线
y = (-weights[0] - weights[1] * x) / weights[2]
ax.plot(x, y)
plt.xlabel('X1');plt.ylabel('X2')
plt.show()
def stocGradAscent0(dataMatrix,classLabels):
m,n = shape(dataMatrix)
alpha = 0.01
weights = ones(n)
for i in range(m):
h = sigmoid(dataMatrix[i]*weights)
error = classLabels[i] - h
weights = weights + alpha * error * dataMatrix[i]
return weights
def stocGradAscent1(dataMatrix, classLabels, numIter=150):
m,n = shape(dataMatrix)
weights = ones(n)
for j in range(numIter):
dataIndex = range(m)
for i in range(m):
##alpha每次迭代时需要调整
alpha = 4/(1.0+j+i)+0.01
##随机选取更新
randIndex = int(random.uniform(0,len(dataIndex)))
h = sigmoid(sum(dataMatrix[randIndex]*weights))
error = classLabels[randIndex] - h
weights = weights + alpha *error*dataMatrix[randIndex]
del(dataIndex[randIndex])
return weights
def classifyVector(inX, weights):
prob = sigmoid(sum(inX*weights))
if prob > 0.5: return 1.0
else: return 0.0
def colicTest():
frTrain = open('horseColicTraining.txt')
frTest = open('horseColicTest.txt')
trainingSet = []; trainingLabels = []
for line in frTrain.readlines():
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
trainingSet.append(lineArr)
trainingLabels.append(float(currLine[21]))
trainWeights = stocGradAscent1(array(trainingSet),trainingLabels,500)
errorCount = 1; numTestVec = 0.0
for line in frTest.readlines():
numTestVec += 1.0
currLine = line.strip().split('\t')
lineArr = []
for i in range(21):
lineArr.append(float(currLine[i]))
if int(classifyVector(array(lineArr),trainWeights)) != int(currLine[21]):
errorCount += 1
errorRate = (float(errorCount)/numTestVec)
print "the error rate of this test is : %f"%errorRate
return errorRate
def multiTest():
numTests = 10; errorSum = 0.0
for k in range(numTests):
errorSum += colicTest()
print "after %d iterations the average error rate is :%f" % (numTests, errorSum/float(numTests))
if __name__ == '__main__':
dataMat,labelMat = loadDataSet()
#weights = stocGradAscent1(array(dataMat), labelMat, 500)
#plotBestFit(weights)
multiTest()
| gpl-2.0 |
kylerbrown/scikit-learn | examples/svm/plot_weighted_samples.py | 188 | 1943 | """
=====================
SVM: Weighted samples
=====================
Plot decision function of a weighted dataset, where the size of points
is proportional to its weight.
The sample weighting rescales the C parameter, which means that the classifier
puts more emphasis on getting these points right. The effect might often be
subtle.
To emphasize the effect here, we particularly weight outliers, making the
deformation of the decision boundary very visible.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
def plot_decision_function(classifier, sample_weight, axis, title):
# plot the decision function
xx, yy = np.meshgrid(np.linspace(-4, 5, 500), np.linspace(-4, 5, 500))
Z = classifier.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# plot the line, the points, and the nearest vectors to the plane
axis.contourf(xx, yy, Z, alpha=0.75, cmap=plt.cm.bone)
axis.scatter(X[:, 0], X[:, 1], c=Y, s=100 * sample_weight, alpha=0.9,
cmap=plt.cm.bone)
axis.axis('off')
axis.set_title(title)
# we create 20 points
np.random.seed(0)
X = np.r_[np.random.randn(10, 2) + [1, 1], np.random.randn(10, 2)]
Y = [1] * 10 + [-1] * 10
sample_weight_last_ten = abs(np.random.randn(len(X)))
sample_weight_constant = np.ones(len(X))
# and bigger weights to some outliers
sample_weight_last_ten[15:] *= 5
sample_weight_last_ten[9] *= 15
# for reference, first fit without class weights
# fit the model
clf_weights = svm.SVC()
clf_weights.fit(X, Y, sample_weight=sample_weight_last_ten)
clf_no_weights = svm.SVC()
clf_no_weights.fit(X, Y)
fig, axes = plt.subplots(1, 2, figsize=(14, 6))
plot_decision_function(clf_no_weights, sample_weight_constant, axes[0],
"Constant weights")
plot_decision_function(clf_weights, sample_weight_last_ten, axes[1],
"Modified weights")
plt.show()
| bsd-3-clause |
rupakc/Kaggle-Compendium | Homesite Quote Conversion/home-baseline.py | 1 | 3586 | import pandas as pd
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import MultinomialNB
from sklearn import metrics
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
from sklearn.model_selection import train_test_split
import numpy
def get_naive_bayes_models():
gnb = GaussianNB()
mnb = MultinomialNB()
bnb = BernoulliNB()
classifier_list = [gnb,mnb,bnb]
classifier_name_list = ['Gaussian NB','Multinomial NB','Bernoulli NB']
return classifier_list,classifier_name_list
def get_neural_network(hidden_layer_size=50):
mlp = MLPClassifier(hidden_layer_sizes=hidden_layer_size)
return [mlp], ['MultiLayer Perceptron']
def get_ensemble_models():
rf = RandomForestClassifier(n_estimators=51,min_samples_leaf=5,min_samples_split=3)
bagg = BaggingClassifier(n_estimators=71,random_state=42)
extra = ExtraTreesClassifier(n_estimators=57,random_state=42)
ada = AdaBoostClassifier(n_estimators=51,random_state=42)
grad = GradientBoostingClassifier(n_estimators=101,random_state=42)
classifier_list = [rf,bagg,extra,ada,grad]
classifier_name_list = ['Random Forests','Bagging','Extra Trees','AdaBoost','Gradient Boost']
return classifier_list,classifier_name_list
def label_encode_frame(dataframe):
columns = dataframe.columns
encoder = LabelEncoder()
for column in columns:
if type(dataframe[column][0]) is str:
dataframe[column] = encoder.fit_transform(dataframe[column].values)
return dataframe
def spilt_date(list_of_date_string,separator='-',format='yyyy-mm-dd'):
month_list = list([])
day_list = list([])
year_list = list([])
for date_string in list_of_date_string:
date_list = date_string.strip().split(separator)
month_list.append(date_list[1])
day_list.append(date_list[2])
year_list.append(date_list[0])
return month_list,day_list,year_list
def print_evaluation_metrics(trained_model,trained_model_name,X_test,y_test):
print '--------- For Model : ', trained_model_name
predicted_values = trained_model.predict(X_test)
print metrics.classification_report(y_test,predicted_values)
print "Accuracy Score : ",metrics.accuracy_score(y_test,predicted_values)
print "---------------------------------------\n"
filename = 'train.csv'
home_frame = pd.read_csv(filename)
class_labels = list(home_frame['QuoteConversion_Flag'].values)
del home_frame['QuoteConversion_Flag']
del home_frame['QuoteNumber']
month_list, day_list, year_list = spilt_date(list(home_frame['Original_Quote_Date'].values))
home_frame['Month'] = month_list
home_frame['Day'] = day_list
home_frame['Year'] = year_list
del home_frame['Original_Quote_Date']
label_encoded_frame = label_encode_frame(home_frame)
imputed_features = Imputer().fit_transform(label_encoded_frame.values)
X_train,X_test,y_train,y_test = train_test_split(imputed_features,class_labels,test_size=0.2,random_state=42)
classifier_list,classifier_name_list = get_ensemble_models()
for classifier,classifier_name in zip(classifier_list,classifier_name_list):
classifier.fit(X_train,y_train)
print_evaluation_metrics(classifier,classifier_name,X_test,y_test) | mit |
GoogleCloudPlatform/mlops-on-gcp | workshops/kfp-caip-sklearn/lab-02-kfp-pipeline/pipeline/covertype_training_pipeline.py | 3 | 7714 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""KFP pipeline orchestrating BigQuery and Cloud AI Platform services."""
import os
from helper_components import evaluate_model
from helper_components import retrieve_best_run
from jinja2 import Template
import kfp
from kfp.components import func_to_container_op
from kfp.dsl.types import Dict
from kfp.dsl.types import GCPProjectID
from kfp.dsl.types import GCPRegion
from kfp.dsl.types import GCSPath
from kfp.dsl.types import String
from kfp.gcp import use_gcp_secret
# Defaults and environment settings
BASE_IMAGE = os.getenv('BASE_IMAGE')
TRAINER_IMAGE = os.getenv('TRAINER_IMAGE')
RUNTIME_VERSION = os.getenv('RUNTIME_VERSION')
PYTHON_VERSION = os.getenv('PYTHON_VERSION')
COMPONENT_URL_SEARCH_PREFIX = os.getenv('COMPONENT_URL_SEARCH_PREFIX')
USE_KFP_SA = os.getenv('USE_KFP_SA')
TRAINING_FILE_PATH = 'datasets/training/data.csv'
VALIDATION_FILE_PATH = 'datasets/validation/data.csv'
TESTING_FILE_PATH = 'datasets/testing/data.csv'
# Parameter defaults
SPLITS_DATASET_ID = 'splits'
HYPERTUNE_SETTINGS = """
{
"hyperparameters": {
"goal": "MAXIMIZE",
"maxTrials": 6,
"maxParallelTrials": 3,
"hyperparameterMetricTag": "accuracy",
"enableTrialEarlyStopping": True,
"params": [
{
"parameterName": "max_iter",
"type": "DISCRETE",
"discreteValues": [500, 1000]
},
{
"parameterName": "alpha",
"type": "DOUBLE",
"minValue": 0.0001,
"maxValue": 0.001,
"scaleType": "UNIT_LINEAR_SCALE"
}
]
}
}
"""
# Helper functions
def generate_sampling_query(source_table_name, num_lots, lots):
"""Prepares the data sampling query."""
sampling_query_template = """
SELECT *
FROM
`{{ source_table }}` AS cover
WHERE
MOD(ABS(FARM_FINGERPRINT(TO_JSON_STRING(cover))), {{ num_lots }}) IN ({{ lots }})
"""
query = Template(sampling_query_template).render(
source_table=source_table_name, num_lots=num_lots, lots=str(lots)[1:-1])
return query
# Create component factories
component_store = kfp.components.ComponentStore(
local_search_paths=None, url_search_prefixes=[COMPONENT_URL_SEARCH_PREFIX])
bigquery_query_op = component_store.load_component('bigquery/query')
mlengine_train_op = component_store.load_component('ml_engine/train')
mlengine_deploy_op = component_store.load_component('ml_engine/deploy')
retrieve_best_run_op = func_to_container_op(
retrieve_best_run, base_image=BASE_IMAGE)
evaluate_model_op = func_to_container_op(evaluate_model, base_image=BASE_IMAGE)
@kfp.dsl.pipeline(
name='Covertype Classifier Training',
description='The pipeline training and deploying the Covertype classifierpipeline_yaml'
)
def covertype_train(project_id,
region,
source_table_name,
gcs_root,
dataset_id,
evaluation_metric_name,
evaluation_metric_threshold,
model_id,
version_id,
replace_existing_version,
hypertune_settings=HYPERTUNE_SETTINGS,
dataset_location='US'):
"""Orchestrates training and deployment of an sklearn model."""
# Create the training split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[1, 2, 3, 4])
training_file_path = '{}/{}'.format(gcs_root, TRAINING_FILE_PATH)
create_training_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=training_file_path,
dataset_location=dataset_location)
# Create the validation split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[8])
validation_file_path = '{}/{}'.format(gcs_root, VALIDATION_FILE_PATH)
create_validation_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=validation_file_path,
dataset_location=dataset_location)
# Create the testing split
query = generate_sampling_query(
source_table_name=source_table_name, num_lots=10, lots=[9])
testing_file_path = '{}/{}'.format(gcs_root, TESTING_FILE_PATH)
create_testing_split = bigquery_query_op(
query=query,
project_id=project_id,
dataset_id=dataset_id,
table_id='',
output_gcs_path=testing_file_path,
dataset_location=dataset_location)
# Tune hyperparameters
tune_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--hptune', 'True'
]
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir/hypertune',
kfp.dsl.RUN_ID_PLACEHOLDER)
hypertune = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=tune_args,
training_input=hypertune_settings)
# Retrieve the best trial
get_best_trial = retrieve_best_run_op(
project_id, hypertune.outputs['job_id'])
# Train the model on a combined training and validation datasets
job_dir = '{}/{}/{}'.format(gcs_root, 'jobdir', kfp.dsl.RUN_ID_PLACEHOLDER)
train_args = [
'--training_dataset_path',
create_training_split.outputs['output_gcs_path'],
'--validation_dataset_path',
create_validation_split.outputs['output_gcs_path'], '--alpha',
get_best_trial.outputs['alpha'], '--max_iter',
get_best_trial.outputs['max_iter'], '--hptune', 'False'
]
train_model = mlengine_train_op(
project_id=project_id,
region=region,
master_image_uri=TRAINER_IMAGE,
job_dir=job_dir,
args=train_args)
# Evaluate the model on the testing split
eval_model = evaluate_model_op(
dataset_path=str(create_testing_split.outputs['output_gcs_path']),
model_path=str(train_model.outputs['job_dir']),
metric_name=evaluation_metric_name)
# Deploy the model if the primary metric is better than threshold
with kfp.dsl.Condition(eval_model.outputs['metric_value'] > evaluation_metric_threshold):
deploy_model = mlengine_deploy_op(
model_uri=train_model.outputs['job_dir'],
project_id=project_id,
model_id=model_id,
version_id=version_id,
runtime_version=RUNTIME_VERSION,
python_version=PYTHON_VERSION,
replace_existing_version=replace_existing_version)
# Configure the pipeline to run using the service account defined
# in the user-gcp-sa k8s secret
if USE_KFP_SA == 'True':
kfp.dsl.get_pipeline_conf().add_op_transformer(
use_gcp_secret('user-gcp-sa'))
| apache-2.0 |
Jordan-Zhu/RoboVision | unsorted/algo-NDT192139AAAD.py | 1 | 6993 | import cv2
import numpy as np
from matplotlib import pyplot as plt
from skimage import morphology
from drawlinefeature import DrawLineFeature,drawconvex#zc
from lineseg import lineseg
from drawedgelist import drawedgelist
from Lseg_to_Lfeat_v2 import Lseg_to_Lfeat_v2 #zc
from LabelLineCurveFeature_v2 import LabelLineCurveFeature_v2 #zc
from merge_lines_v2 import merge_lines# zc
from LabelLineCurveFeature import classify_curves
from zeroElimMedianHoleFill import zeroElimMedianHoleFill
def auto_canny(image, sigma=0.33):
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv2.Canny(image, lower, upper)
# return the edged image
return edged
def showimg(img, im_name='image', type='cv', write=False, imagename='img.png'):
if type == 'plt':
plt.figure()
plt.imshow(img, im_name, interpolation='nearest', aspect='auto')
# plt.imshow(img, 'gray', interpolation='none')
plt.title(im_name)
plt.show()
if write:
plt.savefig(imagename, bbox_inches='tight')
elif type == 'cv':
cv2.imshow(im_name, img)
cv2.waitKey(0)
cv2.destroyAllWindows()
if write:
cv2.imwrite("../../images/%s", imagename, img)
def create_img(mat):
blank_image = np.zeros((mat.shape[0], mat.shape[1], 3), np.uint8)
# print(blank_image.shape)
mask = np.array(mat * 255, dtype=np.uint8)
masked = np.ma.masked_where(mask <= 0, mask)
# plt.figure()
# plt.imshow(blank_image, 'gray', interpolation='none')
# plt.imshow(masked, 'gray_r', interpolation='none', alpha=1.0)
# plt.title('canny + morphology')
# plt.savefig('foo.png', bbox_inches='tight')
# plt.show()
return mask
def grad_dir(img):
# compute x and y derivatives
# OpenCV's Sobel operator gives better results than numpy gradient
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=-1)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=-1)
# calculate gradient direction angles
# phase needs 64-bit input
angle = cv2.phase(sobelx, sobely)
# truncates number
gradir = np.fix(180 + angle)
return gradir
# Contrast Limited Adaptive Histogram Equalization
# Improves the contrast of our image.
def clahe(img, iter=1):
for i in range(0, iter):
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
img = clahe.apply(img)
return img
def normalize_depth(depthimg, colormap=False):
# Normalize depth image
min, max, minloc, maxloc = cv2.minMaxLoc(depthimg)
adjmap = np.zeros_like(depthimg)
dst = cv2.convertScaleAbs(depthimg, adjmap, 255 / (max - min), -min)
if colormap == True:
return cv2.applyColorMap(dst, cv2.COLORMAP_JET)
else:
return dst
def morpho(img):
# kernel for dilation
kernel = np.ones((7, 7), np.uint8)
dilation = cv2.dilate(img, kernel, iterations=1)
skel = morphology.skeletonize(dilation > 0)
return skel
def edge_detect(depth):
# Gradient of depth img
graddir = grad_dir(depth)
# Threshold image to get it in the RGB color space
dimg1 = (((graddir - graddir.min()) / (graddir.max() - graddir.min())) * 255.9).astype(np.uint8)
# Eliminate salt-and-pepper noise
median = cv2.medianBlur(dimg1, 9)
# Further remove noise while keeping edges sharp
blur = cv2.bilateralFilter(median, 9, 25, 25)
dimg1 = auto_canny(blur)
skel1 = morpho(dimg1)
showimg(create_img(skel1))
# cnt1 = find_contours(create_img(skel1))
# Depth discontinuity
depthimg = normalize_depth(depth)
dimg2 = clahe(depthimg, iter=2)
showimg(dimg2)
dimg2 = auto_canny(dimg2)
skel2 = morpho(dimg2)
# cnt2 = find_contours(create_img(skel2))
# combine both images
dst = (np.logical_or(skel1, skel2)).astype('uint8')
dst = create_img(dst)
return dst
def find_contours(im, mode=cv2.RETR_CCOMP):
# im = cv2.imread('circle.png')
# error: (-215) scn == 3 || scn == 4 in function cv::ipp_cvtColor
# imgray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)
# ret, thresh = cv2.threshold(imgray, 127, 255, 0)
if mode == cv2.RETR_CCOMP:
im2, contours, hierarchy = cv2.findContours(im, mode, cv2.CHAIN_APPROX_SIMPLE)
newcontours = []
for i in range(len(contours)):
if hierarchy[0][i, 2] < 0:
newcontours.append(contours[i])
cv2.drawContours(im, newcontours, 2, (0, 255, 0), 1)
return newcontours
else:
im2, contours, hierarchy = cv2.findContours(im, mode, cv2.CHAIN_APPROX_SIMPLE)
# cv2.RETR_EXTERNAL cv2.RETR_CCOMP
# show contours
# cv2.drawContours(im, contours, -1, (0, 255, 0), 2)
#
# # Display the image.
# cv2.imshow("contours", im)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
return contours
if __name__ == '__main__':
# second argument is a flag which specifies the way
# an image should be read. -1 loads image unchanged with alpha channel
depthimg = cv2.imread('img/learn15.png', -1)
colorimg = cv2.imread('img/clearn17.png', 0)
showimg(normalize_depth(depthimg, colormap=True), 'depth')
id = depthimg[100:, 100:480] ## zc crop the region of interest
siz = id.shape ## image size of the region of interest
thresh_m = 10
label_thresh = 11
# edges = edge_detect(depthimg, colorimg)
edges = edge_detect(id) # zc
showimg(edges)
# showimg(cntr1)
# showimg(cntr2)
cntrs = np.asarray(find_contours(edges))
seglist = lineseg(cntrs, tol=2)
drawedgelist(seglist, rowscols=[])
# Get line features for later processing
[linefeature, listpoint] = Lseg_to_Lfeat_v2(seglist, cntrs, siz)
[line_new, listpoint_new, line_merged] = merge_lines(linefeature, listpoint, thresh_m, siz)
# line_new = LabelLineCurveFeature_v2(depthimg, line_new, listpoint_new, label_thresh)
line_new = classify_curves(depthimg, line_new, listpoint_new, label_thresh)
# line_new = LabelLineCurveFeature_v2(depthimg, line_new, listpoint_new, label_thresh)
DrawLineFeature(linefeature,siz,'line features')
drawconvex(line_new, siz, 'convex')
# TO-DO
# - Check LabelLineCurveFeature_v2 with python input
# - Write a function to make a window mask
# - (Section 4) Take non-zero curve features and sort by angle (index 7 in MATLAB)
# Long-term, to make the program easier to read and use
# *- Create a Line object with properties: start, end, object/background, curvature/discontinuity
# distance from another line, check if line is overlapping
| gpl-3.0 |
miloharper/neural-network-animation | neural_network.py | 4 | 4978 | import matplotlib
matplotlib.use("Agg")
from matplotlib import pyplot
from math import fabs
from formulae import sigmoid, sigmoid_derivative, random_weight, get_synapse_colour, adjust_line_to_perimeter_of_circle, layer_left_margin
import parameters
class Synapse():
def __init__(self, input_neuron_index, x1, x2, y1, y2):
self.input_neuron_index = input_neuron_index
self.weight = random_weight()
self.signal = 0
x1, x2, y1, y2 = adjust_line_to_perimeter_of_circle(x1, x2, y1, y2)
self.x1 = x1
self.x2 = x2
self.y1 = y1
self.y2 = y2
def draw(self):
line = pyplot.Line2D((self.x1, self.x2), (self.y1, self.y2), lw=fabs(self.weight), color=get_synapse_colour(self.weight), zorder=1)
outer_glow = pyplot.Line2D((self.x1, self.x2), (self.y1, self.y2), lw=(fabs(self.weight) * 2), color=get_synapse_colour(self.weight), zorder=2, alpha=self.signal * 0.4)
pyplot.gca().add_line(line)
pyplot.gca().add_line(outer_glow)
class Neuron():
def __init__(self, x, y, previous_layer):
self.x = x
self.y = y
self.output = 0
self.synapses = []
self.error = 0
index = 0
if previous_layer:
for input_neuron in previous_layer.neurons:
synapse = Synapse(index, x, input_neuron.x, y, input_neuron.y)
self.synapses.append(synapse)
index += 1
def train(self, previous_layer):
for synapse in self.synapses:
# Propagate the error back down the synapse to the neuron in the layer below
previous_layer.neurons[synapse.input_neuron_index].error += self.error * sigmoid_derivative(self.output) * synapse.weight
# Adjust the synapse weight
synapse.weight += synapse.signal * self.error * sigmoid_derivative(self.output)
return previous_layer
def think(self, previous_layer):
activity = 0
for synapse in self.synapses:
synapse.signal = previous_layer.neurons[synapse.input_neuron_index].output
activity += synapse.weight * synapse.signal
self.output = sigmoid(activity)
def draw(self):
circle = pyplot.Circle((self.x, self.y), radius=parameters.neuron_radius, fill=True, color=(0.2, 0.2, 0), zorder=3)
outer_glow = pyplot.Circle((self.x, self.y), radius=parameters.neuron_radius * 1.5, fill=True, color=(self.output, self.output, 0), zorder=4, alpha=self.output * 0.5)
pyplot.gca().add_patch(circle)
pyplot.gca().add_patch(outer_glow)
pyplot.text(self.x + 0.8, self.y, round(self.output, 2))
for synapse in self.synapses:
synapse.draw()
class Layer():
def __init__(self, network, number_of_neurons):
if len(network.layers) > 0:
self.is_input_layer = False
self.previous_layer = network.layers[-1]
self.y = self.previous_layer.y + parameters.vertical_distance_between_layers
else:
self.is_input_layer = True
self.previous_layer = None
self.y = parameters.bottom_margin
self.neurons = []
x = layer_left_margin(number_of_neurons)
for iteration in xrange(number_of_neurons):
neuron = Neuron(x, self.y, self.previous_layer)
self.neurons.append(neuron)
x += parameters.horizontal_distance_between_neurons
def think(self):
for neuron in self.neurons:
neuron.think(self.previous_layer)
def draw(self):
for neuron in self.neurons:
neuron.draw()
class NeuralNetwork():
def __init__(self, requested_layers):
self.layers = []
for number_of_neurons in requested_layers:
self.layers.append(Layer(self, number_of_neurons))
def train(self, example):
error = example.output - self.think(example.inputs)
self.reset_errors()
self.layers[-1].neurons[0].error = error
for l in range(len(self.layers) - 1, 0, -1):
for neuron in self.layers[l].neurons:
self.layers[l - 1] = neuron.train(self.layers[l - 1])
return fabs(error)
def do_not_think(self):
for layer in self.layers:
for neuron in layer.neurons:
neuron.output = 0
for synapse in neuron.synapses:
synapse.signal = 0
def think(self, inputs):
for layer in self.layers:
if layer.is_input_layer:
for index, value in enumerate(inputs):
self.layers[0].neurons[index].output = value
else:
layer.think()
return self.layers[-1].neurons[0].output
def draw(self):
pyplot.cla()
for layer in self.layers:
layer.draw()
def reset_errors(self):
for layer in self.layers:
for neuron in layer.neurons:
neuron.error = 0 | mit |
maxlit/pyEdgeworthBox | pyEdgeworthBox/pyEdgeworthBox.py | 1 | 10602 | import numpy as np
import matplotlib.pyplot as plt
from math import sqrt, copysign
from scipy.optimize import brenth
from scipy.optimize import fsolve,fmin_l_bfgs_b,fmin_cg,fminbound
"""
sign of the number
"""
def sign(x):
if x==0:
return 0
else:
return copysign(1,x)
"""
if function f can't be computed, return None
"""
def f_None(f,x):
try:
return f(x)
except:
return None
"""
if the bound was touched returns None
L is the level of the function f
"""
def correct(x,y,f,L):
eps=10e-5
if abs(f(x,y)-L)>eps:
return None
else:
return y
"""
if output can't be produced, return 0, if there's division by zero, then it looks for the limit and returns it
"""
def _(f,*x):
try:
out=f(*x)
if out is None:
return float("inf")
else:
return out
except ZeroDivisionError:
l=len(x)
eps=abs(f(*[1e-02]*l)-f(*[1e-04]*l))
if abs(f(*[1e-04]*l)-f(*[1e-06]*l))<eps and abs(f(*[1e-06]*l)-f(*[1e-08]*l))<eps:
return f(*[1e-10]*l)
else:
return sign(f(*[1e-10]*l))*float("inf")
"""
produces the array of the first items of the element of the array
"""
def fst(X):
return list(map(lambda x: x[0],X))
"""
produces the array of the second items of the element of the array
"""
def snd(X):
return list(map(lambda x: x[1],X))
"""
unpacks [(X_1,Y_1),...,(X_k,Y_k),...,(X_n,Y_n)] into [(X_1,...,X_k,...,X_n),(Y_1,...,Y_k,...,Y_n)]
"""
def unpack(X):
return [fst(X),snd(X)]
"""
find the root of the function. If the ends of the interval have the same signs, try to make it smaller
"""
def rootalt(f,a,b):
eps=(b-a)/64.0
turn=0
N_iter=10
while abs(a-b)>eps and N_iter > 0:
N_iter-=1
try:
#return fmin_cg(f,(a+b)/2.0)[0]
return brenth(f,a,b)
except ValueError:
if turn==0:
a=a+eps
turn=1
else:
b=b+eps
turn=0
#return root2(f,a,b)
return None
def root(f,a,b):
a_init=a
b_init=b
eps=(b-a)/16.0
turn=0
N_iter=12
while abs(a-b)>eps and N_iter > 0 and f(a)*f(b)>0:
N_iter-=1
if turn==0:
a=a+eps
turn=1
else:
b=b-eps
turn=0
try:
return brenth(f,a,b)
except ValueError:
return fminbound(f,a_init,b_init)
def root2(f,a,b):
return fmin_cg(f,(a+b)/2.0,disp=False)[0]
def root3(f,a,b):
return fmin_l_bfgs_b(func=f,x0=(a+b)/2,bounds=[a,b])
"""
2-point numerical derivative
"""
def prime(f,dt=10e-3):
return lambda x: (f(x+dt)-f(x-dt))/(2*dt)
"""
Marginal rate of substitution of a utility function u(.)
"""
def MRS(u):
u_x=lambda x,y: prime(lambda z: u(z,y))(x)
u_y=lambda x,y: prime(lambda z: u(x,z))(y)
return lambda x,y: u_x(x,y)/u_y(x,y)
"""
Edgeworth Box parameter determine that to show on the plot
"""
class EdgeBoxParameter:
#def __init__(self,pareto,core,U1,U2,endow,walras,budget,N):
#boll_array=[pareto,core,U1,U2,endow,walras,budget]
def __init__(self,N,pareto=True,core=True,eq=True,budget=True):
self.N=N
self.pareto=pareto
self.core=core
self.eq=eq
self.budget=budget
defaultEBP=EdgeBoxParameter(100)
class EdgeBox():
def __init__(self,u1,u2,IE1,IE2,EBP=defaultEBP):
self.core=0
self.pareto=0
self.eq=0
self.p=[None,1]
self.p_weighted=[None,None]
self.u1=u1
self.u2=u2
self.u2_compl=lambda x,y: u2(self.IE[0]-x,self.IE[1]-y)
self.IE1=IE1
self.IE2=IE2
self.IE=[IE1[0]+IE2[0],IE1[1]+IE2[1]]
self.EBP=EBP
self.dt=min(self.IE)/float(EBP.N)
self.X=np.linspace(self.dt,self.IE[0]-self.dt,EBP.N)
self.Y=np.linspace(self.dt,self.IE[1]-self.dt,EBP.N)
self.calc_init()
self.calc()
def calc(self):
"""
calculate all solutions of the box
"""
self.calc_pareto()
self.calc_core()
self.calc_eq()
self.calc_budget()
def calc_init(self):
self.u1(*self.IE1)
self.UIE1=self.u1(*self.IE1) # utility of the 1-st player at her initial endowment
self.UIE2=self.u2(*self.IE2) # utility of the 2-nd player at her initial endowment
self.u_ie_1=lambda x: root(lambda y: self.u1(x,y)-self.UIE1,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 1-st participant
self.u_ie_2=lambda x: root(lambda y: self.u2(x,y)-self.UIE2,self.Y[0],self.Y[-1]) # utility function at initial endowment of the 2-nd participant
self.u_ie_2_compl=lambda x: -self.u_ie_2(self.IE[0]-x)+self.IE[1] # utility function at initial endowment of the 2-nd participant in terms of the 1-st
U1 = list(map(lambda x: correct(x,f_None(self.u_ie_1,x),self.u1,self.UIE1),self.X))
U2 = list(map(lambda x: correct(x,f_None(self.u_ie_2_compl,x),self.u2_compl,self.UIE2),self.X))
self.U1 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1)))
self.U2 = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2)))
U1_sort = sorted(self.U1,key=lambda x: x[1])
U2_sort = sorted(self.U2,key=lambda x: x[1])
if len(U1_sort)>0:
self.U1_min=U1_sort[0]
self.U1_max=U1_sort[-1]
else:
self.U1_min=None
self.U1_max=None
if len(U2_sort)>0:
self.U2_min=U2_sort[0]
self.U2_max=U2_sort[-1]
else:
self.U2_min=None
self.U2_max=None
self._B=lambda x,y,p: y-(p*(self.IE1[0]-x)+self.IE1[1]) # budget constraint
def calc_pareto(self):
self.MRS1=MRS(self.u1) # marginal rate of substitution of the 1st participant
self.MRS2=MRS(self.u2) # marginal rate of substitution of the 2nd participant
self._pareto=lambda x: root(lambda y: _(self.MRS1,x,y)-_(self.MRS2,self.IE[0]-x,self.IE[1]-y),self.Y[0],self.Y[-1]) # Pareto solutions in functional form
P = list(map(lambda x: f_None(self._pareto,x),self.X[1:-1]))
self.PARETO=list(zip(self.X[1:-1],P)) # set of some Pareto solution points (enough to draw it)
self._Bx=lambda x: root(lambda y: self._B(x,y,self.MRS1(x,y)),self.Y[0],self.Y[-1])
#plot_pareto,=plt.plot(X,P,linewidth=2)
PU1_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_1,x),self.U1_min[0],self.U1_max[0])
PU2_X=root(lambda x: _(self._pareto,x)-_(self.u_ie_2_compl,x),self.U2_min[0],self.U2_max[0])
PU1_Y=self.u_ie_1(PU1_X)
PU2_Y=self.u_ie_2_compl(PU2_X)
self.PU1=[PU1_X,PU1_Y]
self.PU2=[PU2_X,PU2_Y]
self._Bx=lambda x: root(lambda y: _(self._B,x,y,_(self.MRS1,x,y)),self.Y[0],self.Y[-1])
def calc_core(self):
CORE_X = list(filter(lambda x: x>=self.PU1[0] and x<=self.PU2[0], self.X))
CORE_Y = list(map(lambda x: self._pareto(x), CORE_X))
self.CORE = list(zip(CORE_X,CORE_Y)) # set of some solutions in the core (could be one, could be many or none)
def calc_eq(self):
EQ_X1=root(lambda x: _(self._pareto,x)-_(self._Bx,x),self.PU1[0],self.PU2[0])
EQ_Y1=self._pareto(EQ_X1)
EQ_X2=self.IE[0]-EQ_X1
EQ_Y2=self.IE[1]-EQ_Y1
self.EQ1=[EQ_X1,EQ_Y1] # equilibrium solution for the 1st participant
self.EQ2=[EQ_X2,EQ_Y2] # equilibrium solution for the 2nd participant
self.p=self.MRS1(*self.EQ1) # price vector
self.p_weighted=[self.p/(self.p+1),1/(self.p+1)]
self.UEQ1=self.u1(*self.EQ1) # value of utility function of the 1st participant at her equilibrium point (functional form)
self.UEQ2=self.u2(*self.EQ2) # value of utility function of the 2nd participant at her equilibrium point (functional form)
self.u_eq_1=lambda x: root(lambda y: self.u1(x,y)-self.UEQ1,self.Y[0],self.Y[-1])
self.u_eq_2=lambda x: root(lambda y: self.u2(x,y)-self.UEQ2,self.Y[0],self.Y[-1])
self.u_eq_2_compl=lambda x: -self.u_eq_2(self.IE[0]-x)+self.IE[1]
U1_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_1,x),self.u1,self.UEQ1),self.X))
U2_EQ = list(map(lambda x: correct(x,f_None(self.u_eq_2_compl,x),self.u2_compl,self.UEQ2),self.X))
self.U1_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U1_EQ)))
self.U2_EQ = list(filter(lambda x: x[0] is not None and x[1] is not None,zip(self.X,U2_EQ)))
def calc_budget(self,price=None):
if price is None:
price=self.p
self.Bp=lambda x: price*self.IE1[0]+self.IE1[1]-price*x # budget line (functional form)
Budget = list(map(self.Bp,self.X)) # set of some points from the budget line
self.BUDGET = list(zip(self.X,Budget))
def plot(self,fname=None):
plot_endow,=plt.plot(self.IE1[0],self.IE1[1],color="white",marker="o")
m=max(self.IE[0],self.IE[1])
plt.axis([0,m,0,m],autoscale=False)
plot_U1,=plt.plot(*unpack(self.U1),color="blue")
plot_U2,=plt.plot(*unpack(self.U2),color="brown")
plot_pareto,=plt.plot(*unpack(self.PARETO),linewidth=2,color="red")
plot_core,=plt.plot(*unpack(self.CORE),color="black",linewidth=4)
plot_U1_EQ,=plt.plot(*unpack(self.U1_EQ),ls='--',color="blue")
plot_U2_EQ,=plt.plot(*unpack(self.U2_EQ),ls='--',color="brown")
plot_budget,=plt.plot(*unpack(self.BUDGET),color="green")
plt.plot(self.PU1[0],self.PU1[1],color="blue",marker="o")
plt.plot(self.PU2[0],self.PU2[1],color="brown",marker="o")
plot_walras,=plt.plot(self.EQ1[0],self.EQ1[1],color="green",marker="o")
# annotation
plt.annotate("(%s;%s)"%(round(self.EQ1[0],2),round(self.EQ1[1],2)), xy=self.EQ1, xytext=(self.EQ1[0]+self.dt,self.EQ1[1]-self.dt))
plt.title("Edgeworth Box")
plt.legend([plot_pareto,plot_U1,plot_U2,plot_endow,plot_core,plot_walras,plot_budget,plot_U1_EQ,plot_U2_EQ]
,["Pareto","U1 before trade","U2 before trade","Init. endow.","Core","Equilibrium","Budget constraint","U1 at eq.","U2 at eq."])
#Axes Dscription
plt.xlabel("Units of 1-st good")
plt.ylabel("Units of 2-nd good")
if fname is not None:
plt.savefig(fname)
plt.close()
else:
plt.show(block=False)
| mit |
ChanChiChoi/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
epfl-lts2/pygsp | examples/random_walk.py | 1 | 1806 | r"""
Random walks
============
Probability of a random walker to be on any given vertex after a given number
of steps starting from a given distribution.
"""
# sphinx_gallery_thumbnail_number = 2
import numpy as np
from scipy import sparse
from matplotlib import pyplot as plt
import pygsp as pg
N = 7
steps = [0, 1, 2, 3]
graph = pg.graphs.Grid2d(N)
delta = np.zeros(graph.N)
delta[N//2*N + N//2] = 1
probability = sparse.diags(graph.dw**(-1)).dot(graph.W)
fig, axes = plt.subplots(1, len(steps), figsize=(12, 3))
for step, ax in zip(steps, axes):
state = (probability**step).__rmatmul__(delta) ## = delta @ probability**step
graph.plot(state, ax=ax, title=r'$\delta P^{}$'.format(step))
ax.set_axis_off()
fig.tight_layout()
###############################################################################
# Stationary distribution.
graphs = [
pg.graphs.Ring(10),
pg.graphs.Grid2d(5),
pg.graphs.Comet(8, 4),
pg.graphs.BarabasiAlbert(20, seed=42),
]
fig, axes = plt.subplots(1, len(graphs), figsize=(12, 3))
for graph, ax in zip(graphs, axes):
if not hasattr(graph, 'coords'):
graph.set_coordinates(seed=10)
P = sparse.diags(graph.dw**(-1)).dot(graph.W)
# e, u = np.linalg.eig(P.T.toarray())
# np.testing.assert_allclose(np.linalg.inv(u.T) @ np.diag(e) @ u.T,
# P.toarray(), atol=1e-10)
# np.testing.assert_allclose(np.abs(e[0]), 1)
# stationary = np.abs(u.T[0])
e, u = sparse.linalg.eigs(P.T, k=1, which='LR')
np.testing.assert_allclose(e, 1)
stationary = np.abs(u).squeeze()
assert np.all(stationary < 0.71)
colorbar = False if type(graph) is pg.graphs.Ring else True
graph.plot(stationary, colorbar=colorbar, ax=ax, title='$xP = x$')
ax.set_axis_off()
fig.tight_layout()
| bsd-3-clause |
ewels/genomics-status | status/production.py | 2 | 10980 | """ Handlers related to data production.
"""
from collections import OrderedDict
import cStringIO
from datetime import datetime
import json
from dateutil import parser
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import tornado.web
from status.util import dthandler, SafeHandler
from dateutil import parser
class ProductionCronjobsHandler(SafeHandler):
""" Returns a JSON document with the Cronjobs database information
"""
def get(self):
cronjobs = {}
servers = self.application.cronjobs_db.view('server/alias')
for server in servers.rows:
doc = self.application.cronjobs_db.get(server.value)
cronjobs[server.key] = {"last_updated": datetime.strftime(parser.parse(doc['Last updated']), '%Y-%m-%d %H:%M'),
'users': doc['users'], 'server': server.key}
template = self.application.loader.load("cronjobs.html")
self.write(template.generate(gs_globals=self.application.gs_globals,
cronjobs=cronjobs))
class DeliveredMonthlyDataHandler(SafeHandler):
""" Gives the data for monthly delivered amount of basepairs.
Loaded through /api/v1/delivered_monthly url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.delivered(start_date, end_date), default=dthandler))
def delivered(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.projects_db.view("date/m_bp_delivered",
group_level=3)
delivered = OrderedDict()
start = [start_date.year,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = row.key[0]
m = row.key[2]
delivered[dthandler(datetime(y, m, 1))] = int(row.value * 1e6)
return delivered
class DeliveredMonthlyPlotHandler(DeliveredMonthlyDataHandler):
""" Gives a bar plot for monthly delivered amount of basepairs.
Loaded through /api/v1/delivered_monthly.png url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
delivered = self.delivered(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in delivered.keys()]
values = delivered.values()
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels([d.strftime("%Y\n%B") for d in dates])
ax.set_title("Basepairs delivered per month")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
delivered = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(delivered))
self.write(delivered)
class DeliveredQuarterlyDataHandler(SafeHandler):
""" Gives the data for quarterly delivered amount of basepairs.
Loaded through /api/v1/delivered_quarterly url
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.delivered(start_date, end_date), default=dthandler))
def delivered(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.projects_db.view("date/m_bp_delivered",
group_level=2)
delivered = OrderedDict()
start = [start_date.year,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = row.key[0]
q = row.key[1]
delivered[dthandler(datetime(y, (q - 1) * 3 + 1, 1))] = int(row.value * 1e6)
return delivered
class DeliveredQuarterlyPlotHandler(DeliveredQuarterlyDataHandler):
""" Gives a bar plot for quarterly delivered amount of basepairs.
Loaded through /api/v1/delivered_quarterly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
delivered = self.delivered(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in delivered.keys()]
values = delivered.values()
ax.bar(dates, values)
ax.set_xticks(dates)
labels = []
for d in dates:
labels.append("{}\nQ{}".format(d.year, (d.month - 1) // 3 + 1))
ax.set_xticklabels(labels)
ax.set_title("Basepairs delivered per quarter")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
delivered = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(delivered))
self.write(delivered)
class ProducedMonthlyDataHandler(SafeHandler):
""" Serves the amount of data produced per month.
Loaded through /api/v1/produced_monthly
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.bpcounts(start_date, end_date), default=dthandler))
def bpcounts(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.samples_db.view("barcodes/date_read_counts",
group_level=3)
produced = OrderedDict()
start = [start_date.year - 2000,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year - 2000,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = int("20" + str(row.key[0]))
m = row.key[2]
produced[dthandler(datetime(y, m, 1))] = row.value
return produced
class ProducedMonthlyPlotHandler(ProducedMonthlyDataHandler):
""" Serves a plot of amount of data produced per month.
Loaded through /api/v1/produced_monthly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
produced = self.bpcounts(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in produced.keys()]
values = produced.values()
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels([d.strftime("%b-%Y") for d in dates], rotation=30)
ax.set_title("Basepairs produced per month")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
produced = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(produced))
self.write(produced)
class ProducedQuarterlyDataHandler(SafeHandler):
""" Gives the data for quarterly produced amount of basepairs.
Loaded through /api/v1/produced_quarterly
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
self.set_header("Content-type", "application/json")
self.write(json.dumps(self.produced(start_date, end_date), default=dthandler))
def produced(self, start_date=None, end_date=None):
if start_date:
start_date = parser.parse(start_date)
if end_date:
end_date = parser.parse(end_date)
else:
end_date = datetime.now()
view = self.application.samples_db.view("barcodes/date_read_counts",
group_level=2)
produced = OrderedDict()
start = [start_date.year - 2000,
(start_date.month - 1) // 3 + 1,
start_date.month,
start_date.day]
end = [end_date.year - 2000,
(end_date.month - 1) // 3 + 1,
end_date.month,
end_date.day]
for row in view[start:end]:
y = int("20" + str(row.key[0]))
q = row.key[1]
produced[dthandler(datetime(y, (q - 1) * 3 + 1, 1))] = int(row.value)
return produced
class ProducedQuarterlyPlotHandler(ProducedQuarterlyDataHandler):
""" Gives a bar plot for quarterly produced amount of basepairs.
Loaded through /api/v1/produced_quarterly.png
"""
def get(self):
start_date = self.get_argument('start', '2012-01-01T00:00:00')
end_date = self.get_argument('end', None)
produced = self.produced(start_date, end_date)
fig = plt.figure(figsize=[10, 8])
ax = fig.add_subplot(111)
dates = [parser.parse(d) for d in produced.keys()]
values = produced.values()
quarters = [(d.month - 1) // 3 + 1 for d in dates]
years = [d.year for d in dates]
ax.bar(dates, values, width=10)
ax.set_xticks(dates)
ax.set_xticklabels(["{}\nQ{}".format(*t) for t in zip(years, quarters)])
ax.set_title("Basepairs produced per quarter")
FigureCanvasAgg(fig)
buf = cStringIO.StringIO()
fig.savefig(buf, format="png")
produced = buf.getvalue()
self.set_header("Content-Type", "image/png")
self.set_header("Content-Length", len(produced))
self.write(produced)
| mit |
wateraccounting/SEBAL | PreSEBAL/preSEBAL.py | 1 | 110216 | # -*- coding: utf-8 -*-
"""
Created on Thu Sep 08 15:09:49 2016
#test Github
@author: tih
"""
import numpy as np
import os
import scipy.interpolate
import gdal
from openpyxl import load_workbook
import osr
from datetime import datetime, timedelta
import pandas as pd
import shutil
import glob
from netCDF4 import Dataset
import warnings
import SEBAL.pySEBAL.pySEBAL_code as SEBAL
def main():
####################################################################################################################
############################################# CREATE INPUT FOR SEBAL RUN ###########################################
####################################################################################################################
####################################################################################################################
##################################################### PreHANTS ####################################################
####################################################################################################################
# PreHANTS
# Part 1: Define input by user
# Part 2: Set parameters and output folder
# Part 3: RUN SEBAL
# Part 4: HANTS
# Part 5: post HANTS
# Part 6: Write output
####################################################################################################################
################################################# PreHANTS part 1 ##################################################
####################################################################################################################
VegetationExcel =r"E:\Project_2\UAE\Excel\Excel_PreSEBAL_v1_0.xlsx" # This excel defines the p and c factor and vegetation height.
####################################################################################################################
################################################# PreHANTS part 2 ##################################################
####################################################################################################################
# Open Excel workbook used for Vegetation c and p factor conversions
wb_veg = load_workbook(VegetationExcel, data_only=True)
ws_veg = wb_veg['General_Input']
# Input for preSEBAL.py
start_date = "%s" %str(ws_veg['B2'].value)
end_date = "%s" %str(ws_veg['B3'].value)
inputExcel= r"%s" %str(ws_veg['B4'].value) # The excel with all the SEBAL input data
LU_data_FileName = r"%s" %str(ws_veg['B5'].value) # Path to Land Use map
output_folder = r"%s" %str(ws_veg['B7'].value)
# optional paramater
DSSF_Folder= r"%s" %str(ws_veg['B6'].value)
######################## Load Excels ##########################################
# Open Excel workbook for SEBAL inputs
wb = load_workbook(inputExcel, data_only=True)
# Get length of EXCEL sheet
ws = wb['General_Input']
ws2 = wb['VIIRS_PROBAV_Input']
endExcel=int(ws.max_row)
# Create Dict
SEBAL_RUNS = dict()
for number in range(2,endExcel+1):
input_folder_SEBAL = str(ws['B%d' % number].value)
output_folder_SEBAL = str(ws['C%d' % number].value)
Image_Type = int(ws['D%d' % number].value)
PROBA_V_name = str(ws2['D%d' % number].value)
VIIRS_name = str(ws2['B%d' % number].value)
SEBAL_RUNS[number] = {'input_folder': input_folder_SEBAL, 'output_folder': output_folder_SEBAL, 'image_type': Image_Type,'PROBA_V_name': PROBA_V_name,'VIIRS_name': VIIRS_name}
Kind_Of_Runs_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
Kind_Of_Runs_Dict.setdefault(v['image_type'], []).append(k)
######################## Create output folders ##########################################
output_folder_PreSEBAL_SEBAL = os.path.join(output_folder,'PreSEBAL_SEBAL_out')
input_folder_HANTS = os.path.join(output_folder,'HANTS_in')
output_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_out')
temp_folder_PreSEBAL = os.path.join(output_folder,'PreSEBAL_temp')
temp_folder_PreSEBAL_LST = os.path.join(temp_folder_PreSEBAL,'LST')
NDVI_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'NDVI')
Albedo_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Albedo')
WaterMask_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Water_Mask')
LAI_outfolder = os.path.join(output_folder_PreSEBAL,'LAI')
ALBEDO_outfolder_end = os.path.join(output_folder_PreSEBAL,'ALBEDO')
NDVI_outfolder_end = os.path.join(output_folder_PreSEBAL,'NDVI')
WaterMask_outfolder_end = os.path.join(output_folder_PreSEBAL,'Water_Mask')
TRANS_outfolder = os.path.join(output_folder_PreSEBAL,'Transmissivity')
Surface_Temperature_outfolder = os.path.join(output_folder_PreSEBAL_SEBAL,'Surface_Temperature')
output_folder_HANTS_end_sharp = os.path.join(output_folder_PreSEBAL, 'LST_Sharpened')
output_folder_HANTS_end_Veg = os.path.join(output_folder_PreSEBAL, 'Vegetation_Height')
output_folder_p_factor = os.path.join(output_folder_PreSEBAL, 'p_factor')
output_folder_LUE = os.path.join(output_folder_PreSEBAL, 'LUE')
if not os.path.exists(output_folder_PreSEBAL_SEBAL):
os.makedirs(output_folder_PreSEBAL_SEBAL)
if not os.path.exists(output_folder_PreSEBAL):
os.mkdir(output_folder_PreSEBAL)
if not os.path.exists(temp_folder_PreSEBAL):
os.mkdir(temp_folder_PreSEBAL)
if not os.path.exists(NDVI_outfolder):
os.makedirs(NDVI_outfolder)
if not os.path.exists(Albedo_outfolder):
os.makedirs(Albedo_outfolder)
if not os.path.exists(WaterMask_outfolder):
os.makedirs(WaterMask_outfolder)
if not os.path.exists(LAI_outfolder):
os.makedirs(LAI_outfolder)
if not os.path.exists(ALBEDO_outfolder_end):
os.makedirs(ALBEDO_outfolder_end)
if not os.path.exists(NDVI_outfolder_end):
os.makedirs(NDVI_outfolder_end)
if not os.path.exists(WaterMask_outfolder_end):
os.makedirs(WaterMask_outfolder_end)
if not os.path.exists(temp_folder_PreSEBAL_LST):
os.makedirs(temp_folder_PreSEBAL_LST)
if not os.path.exists(Surface_Temperature_outfolder):
os.makedirs(Surface_Temperature_outfolder)
if not os.path.exists(TRANS_outfolder):
os.makedirs(TRANS_outfolder)
if not os.path.exists(output_folder_HANTS_end_sharp):
os.mkdir(output_folder_HANTS_end_sharp)
if not os.path.exists(output_folder_HANTS_end_Veg):
os.mkdir(output_folder_HANTS_end_Veg)
if not os.path.exists(output_folder_p_factor):
os.mkdir(output_folder_p_factor)
if not os.path.exists(output_folder_LUE):
os.mkdir(output_folder_LUE)
# Do not show warnings
warnings.filterwarnings('ignore')
####################################################################################################################
################################################### RUN SEBAL part 3 ###############################################
####################################################################################################################
############################## Define General info ############################
for number in Kind_Of_Runs_Dict[2]: # Number defines the column of the inputExcel
print(number)
if not (SEBAL_RUNS[number]['PROBA_V_name'] == 'None' and SEBAL_RUNS[number]['VIIRS_name'] == 'None'):
Rp = 0.91 # Path radiance in the 10.4-12.5 µm band (W/m2/sr/µm)
tau_sky = 0.866 # Narrow band transmissivity of air, range: [10.4-12.5 µm]
surf_temp_offset = 3 # Surface temperature offset for water
######################## Open General info from SEBAL Excel ###################
# Open the General_Input sheet
ws = wb['General_Input']
# Extract the input and output folder, and Image type from the excel file
input_folder = str(ws['B%d' % number].value)
Image_Type = int(2) # Type of Image (1=Landsat & 2 = VIIRS & GLOBA-V)
# Extract the Path to the DEM map from the excel file
DEM_fileName = '%s' %str(ws['E%d' % number].value) #'DEM_HydroShed_m'
# Open DEM and create Latitude and longitude files
lat,lon,lat_fileName,lon_fileName=SEBAL.DEM_lat_lon(DEM_fileName, temp_folder_PreSEBAL)
######################## Extract general data for Landsat ##########################################
if Image_Type == 1:
# Open the Landsat_Input sheet
ws = wb['Landsat_Input']
# Extract Landsat name, number and amount of thermal bands from excel file
Name_Landsat_Image = str(ws['B%d' % number].value) # From glovis.usgs.gov
Landsat_nr = int(ws['C%d' % number].value) # Type of Landsat (LS) image used (LS5, LS7, or LS8)
Bands_thermal = int(ws['D%d' %number].value) # Number of LS bands to use to retrieve land surface
# Pixel size of the model
pixel_spacing=int(30)
# the path to the MTL file of landsat
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
# read out the general info out of the MTL file in Greenwich Time
year, DOY, hour, minutes, UTM_Zone, Sun_elevation = SEBAL.info_general_metadata(Landsat_meta_fileName) # call definition info_general_metadata
date=datetime.strptime('%s %s'%(year,DOY), '%Y %j')
month = date.month
day = date.day
# define the kind of sensor and resolution of the sensor
sensor1 = 'L%d' % Landsat_nr
sensor2 = 'L%d' % Landsat_nr
sensor3 = 'L%d' % Landsat_nr
res1 = '30m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
# Set the start parameter for determining transmissivity at 0
Determine_transmissivity = 0
######################## Extract general data for VIIRS-PROBAV ##########################################
if Image_Type == 2:
# Open the VIIRS_PROBAV_Input sheet
ws = wb['VIIRS_PROBAV_Input']
# Extract the name of the thermal and quality VIIRS image from the excel file
Name_VIIRS_Image_TB = '%s' %str(ws['B%d' % number].value)
# Extract the name to the PROBA-V image from the excel file
Name_PROBAV_Image = '%s' %str(ws['D%d' % number].value) # Must be a tiff file
# Pixel size of the model
pixel_spacing=int(100)
# UTM Zone of the end results
UTM_Zone = float(ws['G%d' % number].value)
if not Name_VIIRS_Image_TB == 'None':
#Get time from the VIIRS dataset name (IMPORTANT TO KEEP THE TEMPLATE OF THE VIIRS NAME CORRECT example: VIIRS_SVI05_npp_d20161021_t0956294_e1002080_b25822_c20161021160209495952_noaa_ops.tif)
Total_Day_VIIRS = Name_VIIRS_Image_TB.split('_')[3]
Total_Time_VIIRS = Name_VIIRS_Image_TB.split('_')[4]
# Get the information out of the VIIRS name in GMT (Greenwich time)
year = int(Total_Day_VIIRS[1:5])
month = int(Total_Day_VIIRS[5:7])
day = int(Total_Day_VIIRS[7:9])
Startdate = '%d-%02d-%02d' % (year,month,day)
DOY=datetime.strptime(Startdate,'%Y-%m-%d').timetuple().tm_yday
hour = int(Total_Time_VIIRS[1:3])
minutes = int(Total_Time_VIIRS[3:5])
# If this is runned correctly, we can determine transmissivity
ws = wb['Meteo_Input']
Field_Radiation_24 = '%s' %str(ws['J%d' % number].value)
Field_Trans_24 = '%s' %str(ws['K%d' % number].value)
Determine_transmissivity = 1
# else use PROBA-V day but than no transmissivity can be determined for now
else:
# Get the day and time from the PROBA-V
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Var_name = '%d%02d%02d' %(year, month, day)
DOY=datetime.strptime(Var_name,'%Y%m%d').timetuple().tm_yday
# We cannot determine transmissivity
Determine_transmissivity = 0
# Determine the transmissivity if possible (Determine_transmissivity = 1)
if Determine_transmissivity == 1:
# Rounded difference of the local time from Greenwich (GMT) (hours):
delta_GTM = round(np.sign(lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)]) * lon[int(np.shape(lon)[0]/2), int(np.shape(lon)[1]/2)] * 24 / 360)
if np.isnan(delta_GTM) == True:
delta_GTM = round(np.nanmean(lon) * np.nanmean(lon) * 24 / 360)
# Calculate local time
hour += delta_GTM
if hour < 0.0:
day -= 1
hour += 24
if hour >= 24:
day += 1
hour -= 24
# define the kind of sensor and resolution of the sensor
sensor1 = 'PROBAV'
sensor2 = 'VIIRS'
res1 = '375m'
res2 = '%sm' %int(pixel_spacing)
res3 = '30m'
######################## Extract general data from DEM file and create Slope map ##########################################
# Variable date name
Var_name = '%d%02d%02d' %(year, month, day)
# Reproject from Geog Coord Syst to UTM -
# 1) DEM - Original DEM coordinates is Geographic: lat, lon
dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
band = dest.GetRasterBand(1) # Get the reprojected dem band
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
# Read out the DEM band and print the DEM properties
data_DEM = band.ReadAsArray(0, 0, ncol, nrow)
# 2) Latitude file - reprojection
# reproject latitude to the landsat projection and save as tiff file
lat_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
lat_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected latitude data
lat_proy = lat_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# 3) Longitude file - reprojection
# reproject longitude to the landsat projection and save as tiff file
lon_rep, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(lon_fileName, pixel_spacing, UTM_Zone=UTM_Zone)
# Get the reprojected longitude data
lon_proy = lon_rep.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
lon_fileName = os.path.join(temp_folder_PreSEBAL,'lon_resh.tif')
SEBAL.save_GeoTiff_proy(dest, lon_proy, lon_fileName, shape, nband=1)
# Calculate slope and aspect from the reprojected DEM
deg2rad,rad2deg,slope,aspect=SEBAL.Calc_Gradient(data_DEM, pixel_spacing)
if Determine_transmissivity == 1:
# calculate the coz zenith angle
Ra_mountain_24, Ra_inst, cos_zn_resh, dr, phi, delta = SEBAL.Calc_Ra_Mountain(lon,DOY,hour,minutes,lon_proy,lat_proy,slope,aspect)
cos_zn_fileName = os.path.join(temp_folder_PreSEBAL,'cos_zn.tif')
SEBAL.save_GeoTiff_proy(dest, cos_zn_resh, cos_zn_fileName, shape, nband=1)
# Save the Ra
Ra_inst_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_inst.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_inst, Ra_inst_fileName, shape, nband=1)
Ra_mountain_24_fileName = os.path.join(temp_folder_PreSEBAL,'Ra_mountain_24.tif')
SEBAL.save_GeoTiff_proy(dest, Ra_mountain_24, Ra_mountain_24_fileName, shape, nband=1)
#################### Calculate Transmissivity ##########################################
# Open the General_Input sheet
ws = wb['Meteo_Input']
# Extract the method radiation value
Value_Method_Radiation_inst = '%s' %str(ws['L%d' % number].value)
# Values to check if data is created
Check_Trans_inst = 0
Check_Trans_24 = 0
''' This is now turned of, so you need to fill in the instantanious transmissivity or Radiation
# Extract the data to the method of radiation
if int(Value_Method_Radiation_inst) == 2:
Field_Radiation_inst = '%s' %str(ws['N%d' % number].value)
if Field_Radiation_inst == 'None':
# Instantanious Transmissivity files must be created
Check_Trans_inst = 1
# Calculate Transmissivity
quarters_hours = np.ceil(minutes/30.) * 30
hours_GMT = hour - delta_GTM
if quarters_hours >= 60:
hours_GMT += 1
quarters_hours = 0
# Define the instantanious LANDSAF file
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year, month,day, hours_GMT, quarters_hours)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Reproject the Ra_inst data to match the LANDSAF data
Ra_inst_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_inst_fileName, file_Landsaf_inst, method = 1)
Ra_inst_3Km = Ra_inst_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_inst_3Km[Ra_inst_3Km==0] = np.nan
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_inst_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_inst_3Km = np.float_(Rs_inst_3Km)/10
Rs_inst_3Km[Rs_inst_3Km<0]=np.nan
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_3Km = Rs_inst_3Km/Ra_inst_3Km
Transmissivity_3Km_fileName = os.path.join(output_folder_temp,'Transmissivity_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_inst_3Km_dest, Transmissivity_3Km, Transmissivity_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_inst_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_3Km_fileName, cos_zn_fileName, method = 3)
Transmissivity_inst = Transmissivity_inst_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_inst[Transmissivity_inst>0.98] = 0.98
Transmissivity_inst_fileName = os.path.join(TRANS_outfolder,'Transmissivity_inst_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_inst_dest, Transmissivity_inst, Transmissivity_inst_fileName, shape, nband=1)
'''
# Extract the method radiation value
Value_Method_Radiation_24 = '%s' %str(ws['I%d' % number].value)
# Extract the data to the method of radiation
if int(Value_Method_Radiation_24) == 2:
Field_Radiation_24 = '%s' %str(ws['K%d' % number].value)
if Field_Radiation_24 == 'None':
# Daily Transmissivity files must be created
Check_Trans_24 = 1
# Create times that are needed to calculate daily Rs (LANDSAF)
Starttime_GMT = datetime.strptime(Startdate,'%Y-%m-%d') + timedelta(hours=-delta_GTM)
Endtime_GMT = Starttime_GMT + timedelta(days=1)
Times = pd.date_range(Starttime_GMT, Endtime_GMT,freq = '30min')
for Time in Times[:-1]:
year_LANDSAF = Time.year
month_LANDSAF = Time.month
day_LANDSAF = Time.day
hour_LANDSAF = Time.hour
min_LANDSAF = Time.minute
# Define the instantanious LANDSAF file
#re = glob.glob('')
name_Landsaf_inst = 'HDF5_LSASAF_MSG_DSSF_MSG-Disk_%d%02d%02d%02d%02d.tif' %(year_LANDSAF, month_LANDSAF,day_LANDSAF, hour_LANDSAF, min_LANDSAF)
file_Landsaf_inst = os.path.join(DSSF_Folder,name_Landsaf_inst)
# Open the Rs LANDSAF data
dest_Rs_inst_3Km = gdal.Open(file_Landsaf_inst)
Rs_one_3Km = dest_Rs_inst_3Km.GetRasterBand(1).ReadAsArray()
Rs_one_3Km = np.float_(Rs_one_3Km)/10
Rs_one_3Km[Rs_one_3Km < 0]=np.nan
if Time == Times[0]:
Rs_24_3Km_tot = Rs_one_3Km
else:
Rs_24_3Km_tot += Rs_one_3Km
Rs_24_3Km = Rs_24_3Km_tot / len(Times[:-1])
# Reproject the Ra_inst data to match the LANDSAF data
Ra_24_3Km_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Ra_mountain_24_fileName, file_Landsaf_inst, method = 3)
Ra_24_3Km = Ra_24_3Km_dest.GetRasterBand(1).ReadAsArray()
Ra_24_3Km[Ra_24_3Km==0] = np.nan
# Do gapfilling
Ra_24_3Km = gap_filling(Ra_24_3Km,np.nan)
# Get shape LANDSAF data
shape_trans=[dest_Rs_inst_3Km.RasterXSize , dest_Rs_inst_3Km.RasterYSize ]
# Calculate Transmissivity 3Km
Transmissivity_24_3Km = Rs_24_3Km/Ra_24_3Km
Transmissivity_24_3Km_fileName = os.path.join(temp_folder_PreSEBAL,'Transmissivity_24_3Km.tif')
SEBAL.save_GeoTiff_proy(Ra_24_3Km_dest, Transmissivity_24_3Km, Transmissivity_24_3Km_fileName, shape_trans, nband=1)
# Reproject Transmissivity to match DEM (now this is done by using the nearest neighbour method)
Transmissivity_24_dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Transmissivity_24_3Km_fileName, lon_fileName, method = 3)
Transmissivity_24 = Transmissivity_24_dest.GetRasterBand(1).ReadAsArray()
Transmissivity_24[Transmissivity_24>0.98] = 0.98
Transmissivity_24_fileName = os.path.join(TRANS_outfolder,'Transmissivity_24_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(Transmissivity_24_dest, Transmissivity_24, Transmissivity_24_fileName, shape, nband=1)
#################### Calculate NDVI for LANDSAT ##########################################
if Image_Type == 1:
# Define bands used for each Landsat number
if Landsat_nr == 5 or Landsat_nr == 7:
Bands = np.array([1, 2, 3, 4, 5, 7, 6])
elif Landsat_nr == 8:
Bands = np.array([2, 3, 4, 5, 6, 7, 10, 11])
else:
print('Landsat image not supported, use Landsat 7 or 8')
# Open MTL landsat and get the correction parameters
Landsat_meta_fileName = os.path.join(input_folder, '%s_MTL.txt' % Name_Landsat_Image)
Lmin, Lmax, k1_c, k2_c = SEBAL.info_band_metadata(Landsat_meta_fileName, Bands)
# Mean solar exo-atmospheric irradiance for each band (W/m2/microm)
# for the different Landsat images (L5, L7, or L8)
ESUN_L5 = np.array([1983, 1796, 1536, 1031, 220, 83.44])
ESUN_L7 = np.array([1997, 1812, 1533, 1039, 230.8, 84.9])
ESUN_L8 = np.array([1973.28, 1842.68, 1565.17, 963.69, 245, 82.106])
# Open one band - To get the metadata of the landsat images only once (to get the extend)
src_FileName = os.path.join(input_folder, '%s_B2.TIF' % Name_Landsat_Image) # before 10!
ls,band_data,ulx,uly,lrx,lry,x_size_ls,y_size_ls = SEBAL.Get_Extend_Landsat(src_FileName)
# Crop the Landsat images to the DEM extent -
dst_FileName = os.path.join(temp_folder_PreSEBAL,'cropped_LS_b2.tif') # Before 10 !!
# Clip the landsat image to match the DEM map
lsc, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(src_FileName, lon_fileName)
data_LS = lsc.GetRasterBand(1).ReadAsArray()
SEBAL.save_GeoTiff_proy(dest, data_LS, dst_FileName, shape, nband=1)
# Get the extend of the remaining landsat file after clipping based on the DEM file
lsc,band_data,ulx,uly,lrx,lry,x_size_lsc,y_size_lsc = SEBAL.Get_Extend_Landsat(dst_FileName)
# Create the corrected signals of Landsat in 1 array
Reflect = SEBAL.Landsat_Reflect(Bands,input_folder,Name_Landsat_Image,output_folder,shape,Lmax,Lmin,ESUN_L5,ESUN_L7,ESUN_L8,cos_zn_resh,dr,Landsat_nr, cos_zn_fileName)
# Calculate temporal water mask
water_mask_temp=SEBAL.Water_Mask(shape,Reflect)
# Calculate NDVI
NDVI = SEBAL.Calc_NDVI(Reflect)
# Calculate albedo
albedo = SEBAL.Calc_albedo(Reflect)
# Save NDVI
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1)
# Save albedo
albedo_FileName = os.path.join(Albedo_outfolder,'Albedo_LS_%s.tif'%Var_name)
SEBAL.save_GeoTiff_proy(dest, albedo, albedo_FileName, shape, nband=1)
################### Extract Meteo data for Landsat days from SEBAL Excel ##################
# Open the Meteo_Input sheet
ws = wb['Meteo_Input']
# ---------------------------- Instantaneous Air Temperature ------------
# Open meteo data, first try to open as value, otherwise as string (path)
try:
Temp_inst = float(ws['B%d' %number].value) # Instantaneous Air Temperature (°C)
# if the data is not a value, than open as a string
except:
Temp_inst_name = '%s' %str(ws['B%d' %number].value)
Temp_inst_fileName = os.path.join(temp_folder_PreSEBAL, 'Temp_inst_input.tif')
Temp_inst = SEBAL.Reshape_Reproject_Input_data(Temp_inst_name, Temp_inst_fileName, lon_fileName)
try:
RH_inst = float(ws['D%d' %number].value) # Instantaneous Relative humidity (%)
# if the data is not a value, than open as a string
except:
RH_inst_name = '%s' %str(ws['D%d' %number].value)
RH_inst_fileName = os.path.join(temp_folder_PreSEBAL, 'RH_inst_input.tif')
RH_inst = SEBAL.Reshape_Reproject_Input_data(RH_inst_name, RH_inst_fileName, lon_fileName)
esat_inst = 0.6108 * np.exp(17.27 * Temp_inst / (Temp_inst + 237.3))
eact_inst = RH_inst * esat_inst / 100
#################### Calculate NDVI for VIIRS-PROBAV ##########################################
if Image_Type == 2:
if Name_PROBAV_Image == 'None':
offset_all = [-1, 1, -2, 2, -3, 3,-4, 4,-5 ,5 ,-6 , 6, -7, 7, -8, 8]
found_Name_PROBAV_Image = 0
for offset in offset_all:
if found_Name_PROBAV_Image == 1:
continue
else:
try:
Name_PROBAV_Image = SEBAL_RUNS[number + offset]['PROBA_V_name']
if not Name_PROBAV_Image == 'None':
found_Name_PROBAV_Image = 1
except:
pass
# Get the day and time from the PROBA-V
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Var_name_2 = '%d%02d%02d' %(year, month, day)
# Define the output name
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name_2)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name_2)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name_2)
else:
NDVI_FileName = os.path.join(NDVI_outfolder,'NDVI_PROBAV_%s.tif' %Var_name)
Albedo_FileName = os.path.join(Albedo_outfolder, 'Albedo_PROBAV_%s.tif' %Var_name)
water_mask_temp_FileName = os.path.join(WaterMask_outfolder, 'Water_Mask_PROBAV_%s.tif' %Var_name)
# vegetation maps that will be generated
if not os.path.exists(NDVI_FileName):
# Define the bands that will be used
bands=['SM', 'B1', 'B2', 'B3', 'B4'] #'SM', 'BLUE', 'RED', 'NIR', 'SWIR'
# Set the index number at 0
index=0
# create a zero array with the shape of the reprojected DEM file
data_PROBAV=np.zeros((shape[1], shape[0]))
spectral_reflectance_PROBAV=np.zeros([shape[1], shape[0], 5])
# constants
n188_float=248 # Now it is 248, but we do not exactly know what this really means and if this is for constant for all images.
# write the data one by one to the spectral_reflectance_PROBAV
for bandnmr in bands:
# Translate the PROBA-V names to the Landsat band names
Band_number = {'SM':7,'B1':8,'B2':10,'B3':9,'B4':11}
# Open the dataset
Band_PROBAVhdf_fileName = os.path.join(input_folder, '%s.HDF5' % (Name_PROBAV_Image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
# define data if it is not there yet
if not 'Var_name' in locals():
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[0])
day = int(Date_PROBAV.split("-")[0])
Var_name = '%d%02d%02d' %(year, month, day)
# Open the .hdf file
name_out = os.path.join(input_folder, '%s_test.tif' % (Name_PROBAV_Image))
name_in = g.GetSubDatasets()[Band_number[bandnmr]][0]
# Get environmental variable
SEBAL_env_paths = os.environ["SEBAL"].split(';')
GDAL_env_path = SEBAL_env_paths[0]
GDAL_TRANSLATE = os.path.join(GDAL_env_path, 'gdal_translate.exe')
# run gdal translate command
FullCmd = '%s -of GTiff %s %s' %(GDAL_TRANSLATE, name_in, name_out)
SEBAL.Run_command_window(FullCmd)
# Open data
dest_PV = gdal.Open(name_out)
Data = dest_PV.GetRasterBand(1).ReadAsArray()
dest_PV = None
# Remove temporary file
os.remove(name_out)
# Define the x and y spacing
Meta_data = g.GetMetadata()
Lat_Bottom = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LATITUDE'])
Lat_Top = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LATITUDE'])
Lon_Left = float(Meta_data['LEVEL3_GEOMETRY_BOTTOM_LEFT_LONGITUDE'])
Lon_Right = float(Meta_data['LEVEL3_GEOMETRY_TOP_RIGHT_LONGITUDE'])
Pixel_size = float((Meta_data['LEVEL3_GEOMETRY_VNIR_VAA_MAPPING']).split(' ')[-3])
# Define the georeference of the PROBA-V data
geo_PROBAV=[Lon_Left-0.5*Pixel_size, Pixel_size, 0, Lat_Top+0.5*Pixel_size, 0, -Pixel_size] #0.000992063492063
# Define the name of the output file
PROBAV_data_name=os.path.join(input_folder, '%s_%s.tif' % (Name_PROBAV_Image,bandnmr))
dst_fileName=os.path.join(input_folder, PROBAV_data_name)
# create gtiff output with the PROBA-V band
fmt = 'GTiff'
driver = gdal.GetDriverByName(fmt)
dst_dataset = driver.Create(dst_fileName, int(Data.shape[1]), int(Data.shape[0]), 1,gdal.GDT_Float32)
dst_dataset.SetGeoTransform(geo_PROBAV)
# set the reference info
srs = osr.SpatialReference()
srs.SetWellKnownGeogCS("WGS84")
dst_dataset.SetProjection(srs.ExportToWkt())
# write the array in the geotiff band
dst_dataset.GetRasterBand(1).WriteArray(Data)
dst_dataset = None
# Open the PROBA-V band in SEBAL
g=gdal.Open(PROBAV_data_name.replace("\\","/"))
# If the data cannot be opened, change the extension
if g is None:
PROBAV_data_name=os.path.join(input_folder, '%s_%s.tiff' % (Name_PROBAV_Image,bandnmr))
# Reproject the PROBA-V band to match DEM's resolution
PROBAV, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(
PROBAV_data_name, lon_fileName)
# Open the reprojected PROBA-V band data
data_PROBAV_DN = PROBAV.GetRasterBand(1).ReadAsArray(0, 0, ncol, nrow)
# Define the filename to store the cropped Landsat image
dst_FileName = os.path.join(output_folder, 'Output_PROBAV','proy_PROBAV_%s.tif' % bandnmr)
# close the PROBA-V
g=None
# If the band data is not SM change the DN values into PROBA-V values and write into the spectral_reflectance_PROBAV
if bandnmr is not 'SM':
data_PROBAV[:, :]=data_PROBAV_DN/2000
spectral_reflectance_PROBAV[:, :, index]=data_PROBAV[:, :]
# If the band data is the SM band than write the data into the spectral_reflectance_PROBAV and create cloud mask
else:
data_PROBAV[:, :]=data_PROBAV_DN
Cloud_Mask_PROBAV=np.zeros((shape[1], shape[0]))
Cloud_Mask_PROBAV[data_PROBAV[:,:]!=n188_float]=1
spectral_reflectance_PROBAV[:, :, index]=Cloud_Mask_PROBAV
# Change the spectral reflectance to meet certain limits
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]<=0,np.nan,spectral_reflectance_PROBAV[:, :, index])
spectral_reflectance_PROBAV[:, :, index]=np.where(spectral_reflectance_PROBAV[:, :, index]>=150,np.nan,spectral_reflectance_PROBAV[:, :, index])
# Go to the next index
index=index+1
# Bands in PROBAV spectral reflectance
# 0 = MS
# 1 = BLUE
# 2 = NIR
# 3 = RED
# 4 = SWIR
# Calculate surface albedo based on PROBA-V
Surface_Albedo_PROBAV = 0.219 * spectral_reflectance_PROBAV[:, :, 1] + 0.361 * spectral_reflectance_PROBAV[:, :, 2] + 0.379 * spectral_reflectance_PROBAV[:, :, 3] + 0.041 * spectral_reflectance_PROBAV[:, :, 4]
# Calculate the NDVI based on PROBA-V
n218_memory = spectral_reflectance_PROBAV[:, :, 2] + spectral_reflectance_PROBAV[:, :, 3]
NDVI = np.zeros((shape[1], shape[0]))
NDVI[n218_memory != 0] = ( spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] - spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] )/ ( spectral_reflectance_PROBAV[:, :, 2][n218_memory != 0] + spectral_reflectance_PROBAV[:, :, 3][n218_memory != 0] )
# Create Water mask based on PROBA-V
water_mask_temp = np.zeros((shape[1], shape[0]))
water_mask_temp[np.logical_and(np.logical_and(NDVI<0.1,data_DEM>0),Surface_Albedo_PROBAV<0.2)]=1
# Save Albedo for PROBA-V
SEBAL.save_GeoTiff_proy(dest, Surface_Albedo_PROBAV, Albedo_FileName, shape, nband=1)
# Save NDVI for PROBA-V
SEBAL.save_GeoTiff_proy(dest, NDVI, NDVI_FileName, shape, nband=1)
# Save Water Mask for PROBA-V
SEBAL.save_GeoTiff_proy(dest, water_mask_temp, water_mask_temp_FileName, shape, nband=1)
else:
dest_NDVI = gdal.Open(NDVI_FileName)
dest_water_mask_temp = gdal.Open(water_mask_temp_FileName)
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
water_mask_temp = dest_water_mask_temp.GetRasterBand(1).ReadAsArray()
############################ Calculate LAI ##########################################
# Calculate the LAI
FPAR,tir_emis,Nitrogen,vegt_cover,LAI,b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask_temp,shape)
# Create LAI name
if Image_Type == 1:
LAI_FileName = os.path.join(LAI_outfolder,'LAI_LS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1)
#################### Calculate thermal for Landsat ##########################################
if Image_Type == 1:
# Calculate thermal
therm_data = SEBAL.Landsat_therm_data(Bands,input_folder,Name_Landsat_Image,output_folder,ulx_dem,lry_dem,lrx_dem,uly_dem,shape)
# Calculate surface temperature
Surface_temp=SEBAL.Calc_surface_water_temp(Temp_inst,Landsat_nr,Lmax,Lmin,therm_data,b10_emissivity,k1_c,k2_c,eact_inst,shape,water_mask_temp,Bands_thermal,Rp,tau_sky,surf_temp_offset,Image_Type)
# Save surface temperature
therm_data_FileName = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_LS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, Surface_temp, therm_data_FileName, shape, nband=1)
################################## Calculate VIIRS surface temperature ########################
if Image_Type == 2:
# If there is VIIRS data
if not Name_VIIRS_Image_TB == 'None':
# Define the VIIRS thermal data name
VIIRS_data_name=os.path.join(input_folder, '%s' % (Name_VIIRS_Image_TB))
# Reproject VIIRS thermal data
VIIRS, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(VIIRS_data_name, lon_fileName)
# Open VIIRS thermal data
data_VIIRS = VIIRS.GetRasterBand(1).ReadAsArray()
# Set the conditions for the brightness temperature (100m)
brightness_temp=np.where(data_VIIRS>=250, data_VIIRS, np.nan)
# Constants
k1=606.399172
k2=1258.78
L_lambda_b10_100=((2*6.63e-34*(3.0e8)**2)/((11.45e-6)**5*(np.exp((6.63e-34*3e8)/(1.38e-23*(11.45e-6)*brightness_temp))-1)))*1e-6
# Get Temperature for 100 and 375m resolution
Temp_TOA_100 = SEBAL.Get_Thermal(L_lambda_b10_100,Rp,Temp_inst,tau_sky,tir_emis,k1,k2)
# Conditions for surface temperature (100m)
n120_surface_temp=Temp_TOA_100.clip(250, 450)
# Save the surface temperature of the VIIRS in 100m resolution
temp_surface_100_fileName_beforeTS = os.path.join(Surface_Temperature_outfolder,'Surface_Temperature_VIIRS_%s.tif' %Var_name)
SEBAL.save_GeoTiff_proy(dest, n120_surface_temp, temp_surface_100_fileName_beforeTS, shape, nband=1)
###################################################################################################################
################################################### HANTS part 4 ##################################################
###################################################################################################################
# Select files for PROBA-V that needs to be used (sometimes a composite product is used)
PROBA_V_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
if str(v['PROBA_V_name']) != 'None':
PROBA_V_Dict.setdefault(v['PROBA_V_name'], []).append(k)
Amount_Unique_PROBA_V_images = len(PROBA_V_Dict.keys())
Back_names = []
# Define HANTS PROBA-V variables
VARS = ["NDVI", "Albedo"]
for VAR in VARS:
output_folder_preprocessing_VAR = os.path.join(output_folder_PreSEBAL_SEBAL, VAR)
os.chdir(output_folder_preprocessing_VAR)
for PROBA_V_image in PROBA_V_Dict.keys():
Band_PROBAVhdf_fileName = os.path.join(input_folder_SEBAL, '%s.HDF5' % (PROBA_V_image))
g=gdal.Open(Band_PROBAVhdf_fileName, gdal.GA_ReadOnly)
Meta_data = g.GetMetadata()
Date_PROBAV = str(Meta_data['LEVEL3_RADIOMETRY_BLUE_OBSERVATION_START_DATE'])
year = int(Date_PROBAV.split("-")[0])
month = int(Date_PROBAV.split("-")[1])
day = int(Date_PROBAV.split("-")[2])
Back_name = '%s_PROBAV_%d%02d%02d.tif' %(VAR, year, month, day)
# Create HANTS input NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
if not os.path.exists(input_folder_HANTS_VAR):
os.mkdir(input_folder_HANTS_VAR)
shutil.copy(os.path.join(output_folder_preprocessing_VAR,Back_name),os.path.join(input_folder_HANTS_VAR,Back_name))
# VIIRS parameter copy
VIIRS_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
if str(v['VIIRS_name']) != 'None':
VIIRS_Dict.setdefault(v['VIIRS_name'], []).append(k)
THERM = 'Surface_Temperature'
output_folder_preprocessing_THERM = os.path.join(output_folder_PreSEBAL_SEBAL, THERM)
for VIIRS_image in VIIRS_Dict.keys():
try:
Date_VIIRS = (VIIRS_image.split("d")[1])
year = int(Date_VIIRS.split("-")[0][0:4])
month = int(Date_VIIRS.split("-")[0][4:6])
day = int(Date_VIIRS.split("-")[0][6:8])
except:
Date_VIIRS = (VIIRS_image.split("_")[3])
year = int(Date_VIIRS.split("-")[0][0:4])
month = int(Date_VIIRS.split("-")[0][4:6])
day = int(Date_VIIRS.split("-")[0][6:8])
Back_name_TB = '%s_VIIRS_%d%02d%02d.tif' %(THERM, year, month, day)
# Create HANTS input NDVI
input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM)
if not os.path.exists(input_folder_HANTS_THERM):
os.mkdir(input_folder_HANTS_THERM)
shutil.copy(os.path.join(output_folder_preprocessing_THERM,Back_name_TB),os.path.join(input_folder_HANTS_THERM,Back_name_TB))
############################################ Solve shift in PROBA=V ##############################################
VAR = 'Albedo'
os.chdir(os.path.join(temp_folder_PreSEBAL, VAR))
re = glob.glob('%s*.tif' %(VAR))
i = 0
while i < int(len(re)-1):
filename1 = re[0] # maak hier misschien later van dat alleen 0 word genomen als de hoeveelheid pixels minder dan 40% van totaal is
filename2 = re[i + 1]
dest1 = gdal.Open(filename1)
dest2 = gdal.Open(filename2)
Array1 = dest1.GetRasterBand(1).ReadAsArray().flatten()
Array2 = dest2.GetRasterBand(1).ReadAsArray().flatten()
Array3 = dest1.GetRasterBand(1).ReadAsArray()[1:,:].flatten()
Array4 = dest2.GetRasterBand(1).ReadAsArray()[:-1,:].flatten()
Array1_flat = Array1[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))]
Array2_flat = Array2[np.logical_and(~np.isnan(Array1),~np.isnan(Array2))]
Array3_flat = Array3[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))]
Array4_flat = Array4[np.logical_and(~np.isnan(Array3),~np.isnan(Array4))]
Corr = np.corrcoef(Array1_flat,Array2_flat)[0,1]
Corr2 = np.corrcoef(Array3_flat,Array4_flat)[0,1]
if Corr2 > Corr:
x,y = dest1.GetRasterBand(1).ReadAsArray().shape
for VAR_check in VARS:
os.chdir(os.path.join(temp_folder_PreSEBAL, VAR_check))
endname = filename2.split('_')[-1]
re_vars = glob.glob('%s*_%s' %(VAR_check,endname))
filename3 = re_vars[0]
dest3 = gdal.Open(filename3)
New_Array = np.ones(dest1.GetRasterBand(1).ReadAsArray().shape) * np.nan
New_Array[1:,:] = dest3.GetRasterBand(1).ReadAsArray()[:-1,:]
filename_out = os.path.join(temp_folder_PreSEBAL, VAR_check, filename3)
SEBAL.save_GeoTiff_proy(dest3, New_Array, filename_out, [int(y),int(x)], nband=1)
i += 1
################################################### General HANTS ###############################################
# Open one image
PROBA_V_IMAGE = os.path.join(input_folder_HANTS_VAR,Back_name)
destPROBAV = gdal.Open(PROBA_V_IMAGE)
VIIRS_IMAGE = os.path.join(input_folder_HANTS_THERM,Back_name_TB)
destVIIRS = gdal.Open(VIIRS_IMAGE)
# Get Geotransform
Geo_PROBAV = destPROBAV.GetGeoTransform()
x_size_PROBAV = destPROBAV.RasterXSize
y_size_PROBAV = destPROBAV.RasterYSize
Geo_VIIRS = destVIIRS.GetGeoTransform()
x_size_VIIRS = destVIIRS.RasterXSize
y_size_VIIRS = destVIIRS.RasterYSize
# Get projection
proj = Get_epsg(destPROBAV)
projVIIRS = Get_epsg(destVIIRS)
# Data parameters
latlim = [Geo_PROBAV[3] + y_size_PROBAV * Geo_PROBAV[5],Geo_PROBAV[3]]
lonlim = [Geo_PROBAV[0], Geo_PROBAV[0] + x_size_PROBAV * Geo_PROBAV[1]]
cellsize = Geo_PROBAV[1]
latlimVIIRS = [Geo_VIIRS [3] + y_size_VIIRS * Geo_VIIRS [5],Geo_VIIRS [3]]
lonlimVIIRS = [Geo_VIIRS [0], Geo_VIIRS [0] + x_size_VIIRS * Geo_VIIRS [1]]
cellsizeVIIRS = Geo_VIIRS [1]
# Get the HANTS parameters
ws_para = wb_veg['HANTS_Input']
# amount of images
Dates = pd.date_range(start_date, end_date, freq = 'D')
###################################################### HANTS Thermal ###############################################
# Define parameters for the NDVI
THERM = 'Surface_Temperature'
# Define paths for NDVI
input_folder_HANTS_THERM = os.path.join(temp_folder_PreSEBAL, THERM)
name_format = '%s_VIIRS_{0}.tif' %THERM
nc_path_TB = os.path.join(input_folder_HANTS_THERM,'%s_NC.nc' %THERM)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI
nb = int(len(Dates))
Dates = pd.date_range(start_date, end_date, freq = 'D')
nf = int(ws_para['D2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['D3'].value) # valid range minimum
high = float(ws_para['D4'].value) # valid range maximum
HiLo = str(ws_para['D5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['D6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['D7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['D8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from SEBAL.hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_THERM, name_format,
start_date, end_date, latlimVIIRS, lonlimVIIRS, cellsizeVIIRS, nc_path_TB,
nb, nf, HiLo, low, high, fet, dod, delta,
projVIIRS, -9999.0, rasters_path_out, export_hants_only=True)
###################################################### HANTS NDVI ###############################################
# Define parameters for the NDVI
VAR = 'NDVI'
# Define paths for NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
name_format = '%s_PROBAV_{0}.tif' %VAR
nc_path_ndvi = os.path.join(input_folder_HANTS_VAR,'%s_NC.nc' %VAR)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI # Dates = pd.date_range(start_date, end_date, freq = '5D')
nb = int(len(Dates)) # nr of images
nf = int(ws_para['C2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['C3'].value) # valid range minimum
high = float(ws_para['C4'].value) # valid range maximum
HiLo = str(ws_para['C5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['C6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['C7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['C8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from SEBAL.hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path_ndvi,
nb, nf, HiLo, low, high, fet, dod, delta,
proj, -9999.0, rasters_path_out, export_hants_only=True)
###################################################### HANTS Albedo ##############################################
# Define parameters for the albedo
VAR = 'Albedo'
# Define paths for NDVI
input_folder_HANTS_VAR = os.path.join(temp_folder_PreSEBAL, VAR)
name_format = '%s_PROBAV_{0}.tif' %VAR
nc_path_albedo = os.path.join(input_folder_HANTS_VAR,'%s_NC.nc' %VAR)
# Create Output folder
rasters_path_out = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
if not os.path.exists(rasters_path_out):
os.mkdir(rasters_path_out)
# HANTS parameters for NDVI
Dates = pd.date_range(start_date, end_date, freq = 'D')
nb = int(len(Dates)) # nr of images
nf = int(ws_para['B2'].value) # number of frequencies to be considered above the zero frequency
low = float(ws_para['B3'].value) # valid range minimum
high = float(ws_para['B4'].value) # valid range maximum
HiLo = str(ws_para['B5'].value) # 2-character string indicating rejection of high or low outliers
fet = float(ws_para['B6'].value) # fit error tolerance (point eviating more than fet from curve fit are rejected)
delta = float(ws_para['B7'].value) # small positive number e.g. 0.1 to supress high amplitudes
dod = float(ws_para['B8'].value) # degree of overdeterminedness (iteration stops if number of points reaches the minimum required for curve fitting, plus dod). This is a safety measure
from SEBAL.hants import wa_gdal
# Run
wa_gdal.run_HANTS(input_folder_HANTS_VAR, name_format,
start_date, end_date, latlim, lonlim, cellsize, nc_path_albedo,
nb, nf, HiLo, low, high, fet, dod, delta,
proj, -9999.0, rasters_path_out, export_hants_only=True)
###################################################################################################################
################################################### post HANTS part 5 #############################################
###################################################################################################################
############################################# Create Outlier maps for PROBA-V #######################################
# Create output folder if not exists
output_folder_HANTS_outliers_PROBAV = os.path.join(temp_folder_PreSEBAL, 'Outliers_PROBAV')
if not os.path.exists(output_folder_HANTS_outliers_PROBAV):
os.mkdir(output_folder_HANTS_outliers_PROBAV)
fh = Dataset(nc_path_albedo, mode='r')
Var = fh.variables.keys()[-1]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR, Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(time)[0])):
time_now = time[i]
data = fh.variables['outliers'][:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers_PROBAV, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data, name_out, shape, nband=1)
############################################# Create ALBEDO and NDVI #########################################
# Create the end thermal files date by date
for date in Dates:
# Define date
year = date.year
month = date.month
day = date.day
# input filenames needed for creating end thermal file
filename_outliers = os.path.join(output_folder_HANTS_outliers_PROBAV,"Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day))
VAR = 'Albedo'
input_folder_PreSEBAL_ALBEDO = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
filename_Albedo_original = os.path.join(Albedo_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
filename_Albedo_HANTS = os.path.join(input_folder_PreSEBAL_ALBEDO, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
VAR = 'NDVI'
input_folder_PreSEBAL_NDVI = os.path.join(temp_folder_PreSEBAL, VAR + "_HANTS")
filename_NDVI_original = os.path.join(NDVI_outfolder, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
filename_NDVI_HANTS = os.path.join(input_folder_PreSEBAL_NDVI, "%s_PROBAV_%d%02d%02d.tif" %(VAR,year,month,day))
# Open the input filenames
dest_outliers = gdal.Open(filename_outliers)
dest_PROBAV_ALBEDO = gdal.Open(filename_Albedo_original)
dest_PROBAV_NDVI = gdal.Open(filename_NDVI_original)
dest_HANTS_ALBEDO = gdal.Open(filename_Albedo_HANTS)
dest_HANTS_NDVI = gdal.Open(filename_NDVI_HANTS)
# If original exists, this will be the basis for the end thermal map
if not dest_PROBAV_ALBEDO == None:
# Open arrays of the input files
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_ALBEDO_original = dest_PROBAV_ALBEDO.GetRasterBand(1).ReadAsArray()
Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray()[:,:]
Array_NDVI_original = dest_PROBAV_NDVI.GetRasterBand(1).ReadAsArray()
Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray()[:,:]
# Create outlier Mask
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
# Create a buffer zone arround the bad pixels
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
# If there are more than 300 Good pixels
if np.nansum(Array_outliers_mask) > 300:
# Use the mask to find the good original pixels and HANTS pixels
Array_ALBEDO_original_mask_nan = Array_ALBEDO_original * Array_outliers_mask
Array_ALBEDO_HANTS_mask_nan = Array_ALBEDO_HANTS * Array_outliers_mask
Array_NDVI_original_mask_nan = Array_NDVI_original * Array_outliers_mask
Array_NDVI_HANTS_mask_nan = Array_NDVI_HANTS * Array_outliers_mask
# Create a 1D array of those pixels
Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan.flatten()
Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan.flatten()
Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan.flatten()
Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan.flatten()
# Remove pixels with high and low values
Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten<-0.2] = np.nan
Array_ALBEDO_HANTS_mask_nan_flatten[Array_ALBEDO_HANTS_mask_nan_flatten>0.6] = np.nan
Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten<-0.2] = np.nan
Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten>0.6] = np.nan
Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten<-0.2] = np.nan
Array_NDVI_HANTS_mask_nan_flatten[Array_NDVI_HANTS_mask_nan_flatten>0.6] = np.nan
Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten<-0.2] = np.nan
Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten>0.6] = np.nan
# Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array)
Array_ALBEDO_original_mask_nan_flatten2 = Array_ALBEDO_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))]
Array_ALBEDO_HANTS_mask_nan_flatten2 = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_ALBEDO_original_mask_nan_flatten),~np.isnan(Array_ALBEDO_HANTS_mask_nan_flatten))]
Array_NDVI_original_mask_nan_flatten2 = Array_NDVI_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_original_mask_nan_flatten),~np.isnan(Array_NDVI_HANTS_mask_nan_flatten))]
Array_NDVI_HANTS_mask_nan_flatten2 = Array_NDVI_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_NDVI_HANTS_mask_nan_flatten),~np.isnan(Array_NDVI_original_mask_nan_flatten))]
Array_ALBEDO_original_mask_nan_flatten = Array_ALBEDO_original_mask_nan_flatten2
Array_ALBEDO_HANTS_mask_nan_flatten = Array_ALBEDO_HANTS_mask_nan_flatten2
Array_NDVI_original_mask_nan_flatten = Array_NDVI_original_mask_nan_flatten2
Array_NDVI_HANTS_mask_nan_flatten = Array_NDVI_HANTS_mask_nan_flatten2
# Remove all zero values
Array_ALBEDO_original_mask_nan_flatten_without_zero =Array_ALBEDO_original_mask_nan_flatten[Array_ALBEDO_original_mask_nan_flatten != 0.0]
Array_NDVI_original_mask_nan_flatten_without_zero =Array_NDVI_original_mask_nan_flatten[Array_NDVI_original_mask_nan_flatten != 0.0]
# Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels
Array_ALBEDO_original_mask_value_cold = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,40)
Array_ALBEDO_original_mask_value_hot = np.nanpercentile(Array_ALBEDO_original_mask_nan_flatten_without_zero,90)
Array_NDVI_original_mask_value_cold = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,40)
Array_NDVI_original_mask_value_hot = np.nanpercentile(Array_NDVI_original_mask_nan_flatten_without_zero,90)
# Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas)
Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest = Array_ALBEDO_HANTS_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)]
Array_ALBEDO_original_mask_nan_flatten_exc_coldest = Array_ALBEDO_original_mask_nan_flatten[np.logical_and(Array_ALBEDO_original_mask_nan_flatten > Array_ALBEDO_original_mask_value_cold,Array_ALBEDO_original_mask_nan_flatten < Array_ALBEDO_original_mask_value_hot)]
Array_NDVI_HANTS_mask_nan_flatten_exc_coldest = Array_NDVI_HANTS_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)]
Array_NDVI_original_mask_nan_flatten_exc_coldest = Array_NDVI_original_mask_nan_flatten[np.logical_and(Array_NDVI_original_mask_nan_flatten > Array_NDVI_original_mask_value_cold,Array_NDVI_original_mask_nan_flatten < Array_NDVI_original_mask_value_hot)]
#Calculate the mean of those arrays
Ave_ALBEDO_HANTS = np.nanmean(Array_ALBEDO_HANTS_mask_nan_flatten_exc_coldest)
Ave_ALBEDO_original = np.nanmean(Array_ALBEDO_original_mask_nan_flatten_exc_coldest)
Ave_NDVI_HANTS = np.nanmean(Array_NDVI_HANTS_mask_nan_flatten_exc_coldest)
Ave_NDVI_original = np.nanmean(Array_NDVI_original_mask_nan_flatten_exc_coldest)
# Calculate the correction factor for the simulated image
Factor_Albedo = Ave_ALBEDO_original/Ave_ALBEDO_HANTS
Factor_NDVI = Ave_NDVI_original/Ave_NDVI_HANTS
# Apply this factor over the simulated HANTS image
Array_ALBEDO_HANTS_Corrected = Array_ALBEDO_HANTS * Factor_Albedo
Array_NDVI_HANTS_Corrected = Array_NDVI_HANTS * Factor_NDVI
# Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values
End_array_Albedo = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array_Albedo[Array_outliers_mask==0] =Array_ALBEDO_HANTS_Corrected[Array_outliers_mask==0]
End_array_Albedo[Array_outliers_mask==1] =Array_ALBEDO_original[Array_outliers_mask==1]
End_array_NDVI = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array_NDVI[Array_outliers_mask==0] =Array_NDVI_HANTS_Corrected[Array_outliers_mask==0]
End_array_NDVI[Array_outliers_mask==1] =Array_NDVI_original[Array_outliers_mask==1]
# If the original images is to bad than replace the whole image by the simulated HANTS image
else:
End_array_Albedo = Array_ALBEDO_HANTS
End_array_NDVI = Array_NDVI_HANTS
# Get the geolocation information of the image
geo = dest_PROBAV_ALBEDO.GetGeoTransform()
proj = dest_outliers.GetProjection()
# If there is no original image, use the simulated HANTS image
else:
Array_ALBEDO_HANTS = dest_HANTS_ALBEDO.GetRasterBand(1).ReadAsArray()
End_array_Albedo = Array_ALBEDO_HANTS
Array_NDVI_HANTS = dest_HANTS_NDVI.GetRasterBand(1).ReadAsArray()
End_array_NDVI = Array_NDVI_HANTS
dest_test = None
i = 0
while dest_test == None:
# Get the date of the first image that exists to get the geolocation information
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_ALBEDO_original2 = os.path.join(input_folder_PreSEBAL_ALBEDO, "Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2))
dest_test = gdal.Open(filename_ALBEDO_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
# Save the end array
output_name_end_ALBEDO = os.path.join(ALBEDO_outfolder_end, "Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array_Albedo, output_name_end_ALBEDO, shape, nband=1)
output_name_end_NDVI = os.path.join(NDVI_outfolder_end, "NDVI_PROBAV_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array_NDVI, output_name_end_NDVI, shape, nband=1)
############################################# Create Outlier maps for VIIRS #########################################
# Create output folder if not exists
output_folder_HANTS_outliers_VIIRS = os.path.join(temp_folder_PreSEBAL, 'Outliers_VIIRS')
if not os.path.exists(output_folder_HANTS_outliers_VIIRS):
os.mkdir(output_folder_HANTS_outliers_VIIRS)
fh = Dataset(nc_path_TB, mode='r')
Var = fh.variables.keys()[-1]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_THERM,Back_name_TB)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(time)[0])):
time_now = time[i]
data = fh.variables['outliers'][:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers_VIIRS, 'Outliers_VIIRS_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data, name_out, shape, nband=1)
############################################# Create end thermal #########################################
# Create the end thermal files date by date
for date in Dates:
# Define date
year = date.year
month = date.month
day = date.day
# input filenames needed for creating end thermal file
filename_outliers = os.path.join(output_folder_HANTS_outliers_VIIRS,"Outliers_VIIRS_%d%02d%02d.tif" %(year,month,day))
filename_VIIRS_original = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day))
filename_VIIRS_HANTS = os.path.join(temp_folder_PreSEBAL, THERM + "_HANTS", "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year,month,day))
# Open the input filenames
dest_outliers = gdal.Open(filename_outliers)
dest_VIIRS_original = gdal.Open(filename_VIIRS_original)
dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS)
# If original exists, this will be the basis for the end thermal map
if not dest_VIIRS_original == None:
# Open arrays of the input files
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray()
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:]
# Create outlier Mask
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
# Create a buffer zone arround the bad pixels
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
# If there are more than 300 Good pixels
if np.nansum(Array_outliers_mask) > 300:
# Use the mask to find the good original pixels and HANTS pixels
Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask
Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask
# Create a 1D array of those pixels
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten()
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten()
# Remove pixels with high and low values
Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten<250] = np.nan
Array_VIIRS_HANTS_mask_nan_flatten[Array_VIIRS_HANTS_mask_nan_flatten>350] = np.nan
Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten<250] = np.nan
Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>350] = np.nan
# Remove the nan values (if there is a nan in one of the arrays remove also the same value in the other array)
Array_VIIRS_original_mask_no_nan_flatten = Array_VIIRS_original_mask_nan_flatten[np.logical_or(~np.isnan(Array_VIIRS_original_mask_nan_flatten),~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten))]
Array_VIIRS_HANTS_mask_no_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_or(~np.isnan(Array_VIIRS_original_mask_nan_flatten),~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten))]
# Remove all zero values
Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_no_nan_flatten[Array_VIIRS_original_mask_no_nan_flatten>0]
# Caluculate the value of the 40 and 90 percent percentiles of the original arrays good pixels
Array_VIIRS_original_mask_value_cold = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40)
Array_VIIRS_original_mask_value_hot = np.nanpercentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90)
# Delete the colder and hotter pixel values in both 1D arrays (this is to exclude large areas of seas)
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_no_nan_flatten[np.logical_and(Array_VIIRS_original_mask_no_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_no_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_no_nan_flatten[np.logical_and(Array_VIIRS_original_mask_no_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_no_nan_flatten < Array_VIIRS_original_mask_value_hot)]
#Calculate the mean of those arrays
Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest)
Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest)
# Calculate the correction factor for the simulated image
Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS
# Apply this factor over the simulated HANTS image
Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor
# Create the end array by replacing the bad pixels of the original array by the corrected simulated HANTS values
End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0]
End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1]
# If the original images is to bad than replace the whole image by the simulated HANTS image
else:
End_array = Array_VIIRS_HANTS
# Get the geolocation information of the image
geo = dest_VIIRS_original.GetGeoTransform()
proj = dest_outliers.GetProjection()
# If there is no original image, use the simulated HANTS image
else:
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()
End_array = Array_VIIRS_HANTS
dest_test = None
i = 0
while dest_test == None:
# Get the date of the first image that exists to get the geolocation information
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_VIIRS_original2 = os.path.join(input_folder_HANTS_THERM, "Surface_Temperature_VIIRS_%d%02d%02d.tif" %(year2,month2,day2))
dest_test = gdal.Open(filename_VIIRS_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
# Save the end array
output_name_end_LST = os.path.join(temp_folder_PreSEBAL_LST, "VIIRS_LST_%d%02d%02d.tif"%(year,month,day))
SEBAL.save_GeoTiff_proy(dest, End_array, output_name_end_LST, shape, nband=1)
###################################################################################################################
###################################################### preSEBAL continue ##########################################
###################################################################################################################
############################################### Apply thermal sharpening ##########################################
print('---------------------------------------------------------')
print('-------------------- Downscale VIIRS --------------------')
print('---------------------------------------------------------')
# Upscale VIIRS and PROBA-V to 400m
pixel_spacing_upscale = 400
# Open the General_Input sheet
ws = wb['General_Input']
# Extract the input and output folder, and Image type from the excel file
DEM_fileName = str(ws['E2'].value)
ws = wb['VIIRS_PROBAV_Input']
UTM_Zone = int(str(ws['G2'].value))
# Reproject from Geog Coord Syst to UTM -
# 1) DEM - Original DEM coordinates is Geographic: lat, lon
proyDEM_fileName_100 = os.path.join(temp_folder_PreSEBAL,'DEM_100.tif')
dest, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing = 100, UTM_Zone=UTM_Zone)
band = dest.GetRasterBand(1) # Get the reprojected dem band
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
DEM = band.ReadAsArray()
# Save DEM file with the 100 meter resolution
SEBAL.save_GeoTiff_proy(dest, DEM, proyDEM_fileName_100, shape, nband=1)
# Create upscaled DEM
proyDEM_fileName_400 = os.path.join(temp_folder_PreSEBAL,'DEM_400.tif')
dest_400, ulx_dem_400, lry_dem_400, lrx_dem_400, uly_dem_400, epsg_to = SEBAL.reproject_dataset(
DEM_fileName, pixel_spacing_upscale, UTM_Zone = UTM_Zone)
# find spatial parameters array
DEM_400 = dest_400.GetRasterBand(1).ReadAsArray()
Y_raster_size_400 = dest_400.RasterYSize
X_raster_size_400 = dest_400.RasterXSize
shape_400=([X_raster_size_400, Y_raster_size_400])
# Save DEM file with the 400 meter resolution
SEBAL.save_GeoTiff_proy(dest_400, DEM_400, proyDEM_fileName_400, shape_400, nband=1)
for date in Dates:
surf_temp_fileName = os.path.join(temp_folder_PreSEBAL, 'Surf_temp_After_TS_%d%02d%02d.tif' %(date.year, date.month, date.day))
temp_surface_100_fileName_beforeTS = os.path.join(temp_folder_PreSEBAL_LST,'VIIRS_LST_%d%02d%02d.tif' %(date.year, date.month, date.day))
################################ Thermal Sharpening #####################################################
# Define filename
file_NDVI_after_HANTS = os.path.join(NDVI_outfolder_end, 'NDVI_PROBAV_%d%02d%02d.tif' %(date.year, date.month, date.day))
# Open NDVI/LST destination folder
dest_NDVI = gdal.Open(file_NDVI_after_HANTS)
dest_LST = gdal.Open(temp_surface_100_fileName_beforeTS)
# Open NDVI array
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
# Open LST array
LST = dest_LST.GetRasterBand(1).ReadAsArray()
# Upscale thermal band VIIRS from 100m to 400m
VIIRS_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example(
temp_surface_100_fileName_beforeTS, proyDEM_fileName_400)
data_Temp_Surf_400 = VIIRS_Upscale.GetRasterBand(1).ReadAsArray()
# Upscale PROBA-V NDVI from 100m to 400m
NDVI_PROBAV_Upscale, ulx_dem, lry_dem, lrx_dem, uly_dem, epsg_to = SEBAL.reproject_dataset_example(
file_NDVI_after_HANTS, proyDEM_fileName_400)
data_NDVI_400 = NDVI_PROBAV_Upscale.GetRasterBand(1).ReadAsArray()
# Define the width of the moving window box
Box=9
# Apply the surface temperature sharpening
temp_surface_sharpened = SEBAL.Thermal_Sharpening(data_Temp_Surf_400, data_NDVI_400, NDVI, Box, NDVI_PROBAV_Upscale, output_folder, proyDEM_fileName_100, shape, dest, surf_temp_fileName)
# Create Water mask based on HANTS NDVI output
water_mask = np.zeros((shape[1], shape[0]))
water_mask[NDVI<0.0]=1
# Divide temporal watermask in snow and water mask by using surface temperature
Snow_Mask_PROBAV, water_mask, ts_moist_veg_min, NDVI_max, NDVI_std = SEBAL.CalculateSnowWaterMask(NDVI,shape,water_mask,temp_surface_sharpened)
# Replace water values
temp_surface_sharpened[water_mask==1] = LST[water_mask == 1]
temp_surface_sharpened = np.where(np.isnan(temp_surface_sharpened), LST, temp_surface_sharpened)
surf_temp_fileName = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%d%02d%02d.tif' %(date.year, date.month, date.day))
SEBAL.save_GeoTiff_proy(dest, temp_surface_sharpened, surf_temp_fileName, shape, nband=1)
################################################## Calculate LAI ##################################################
# Open NDVI destination folder
dest_NDVI = gdal.Open(file_NDVI_after_HANTS)
# Open NDVI array
NDVI = dest_NDVI.GetRasterBand(1).ReadAsArray()
LAI_FileName = os.path.join(LAI_outfolder,'LAI_%d%02d%02d.tif' %(date.year, date.month, date.day))
# Calculate LAI
FPAR, tir_emis, Nitrogen, vegt_cover, LAI, b10_emissivity = SEBAL.Calc_vegt_para(NDVI,water_mask, shape)
SEBAL.save_GeoTiff_proy(dest, LAI, LAI_FileName, shape, nband=1)
################################ Calculate the Vegetation height ########################
# Open preprosessing excel the Vegetation_Height sheet
ws_veg = wb_veg['Vegetation_Height']
# Define output name for the LandUse map
dst_FileName = os.path.join(output_folder,'LU.tif')
# Open LU data
LU_dest = gdal.Open(LU_data_FileName)
LU_data = LU_dest.GetRasterBand(1).ReadAsArray()
# Reproject the LAI to the same projection as LU
dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS
LAI_proj = dest1.GetRasterBand(1).ReadAsArray()
# Read out the excel file coefficient numbers
Array = np.zeros([ws_veg.max_row-1,4])
for j in ['A','C','D','E']:
j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3}
for i in range(2,ws_veg.max_row+1):
Value = (ws_veg['%s%s' %(j,i)].value)
Array[i-2, j_number[j]] = Value
# Create maps with the coefficient numbers for the right land cover
coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3])
for coeff_nmbr in range(0,3):
for Class in range(0,len(Array)):
coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1]
# Get some dimensions of the projected dataset
band_data = dest1.GetRasterBand(1)
ncol_data = dest1.RasterXSize
nrow_data = dest1.RasterYSize
shape_data=[ncol_data, nrow_data]
# Calculate the vegetation height in the LU projection
Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2]
Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600)
# Save the vegetation height in the lU projection in the temporary directory
Veg_Height_proj_FileName = os.path.join(temp_folder_PreSEBAL,'Veg_Height_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1)
# Reproject the Veg_height to the LAI projection
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName)
# Get some dimensions of the original dataset
band_data = dest.GetRasterBand(1)
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
# Open the Veg_height with the same projection as LAI
Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
Veg_Height[Veg_Height == 0] = 0.4
# Save Vegetation Height in the end folder
dst_FileName = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%d%02d%02d.tif' %(date.year, date.month, date.day))
SEBAL.save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1)
######################## calculate Water Mask #########################
# Open all the water mask
os.chdir(WaterMask_outfolder)
re_water_mask = glob.glob('Water_Mask*.tif')
# Loop over all the files
for water_mask_filename in re_water_mask:
# Create the filepath to the water mask
water_mask_filepath = os.path.join(WaterMask_outfolder,water_mask_filename)
# Open Array
water_mask_dest = gdal.Open(water_mask_filepath)
# If the total water mask raster does not exists create this one
if not 'water_mask_array' in locals():
water_mask_array = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize])
# Add all the water masks
water_mask_array += water_mask_dest.GetRasterBand(1).ReadAsArray()
# Calculate the end water mask if the area is more than 50 percent defined as water
water_mask_array_per = water_mask_array/len(re_water_mask)
water_mask_array_end = np.zeros([water_mask_dest.RasterYSize, water_mask_dest.RasterXSize])
water_mask_array_end[water_mask_array_per > 0.5] = 1
# Save water mask
WaterMask_outfolder_end_FileName = os.path.join(WaterMask_outfolder_end,'Water_Mask.tif')
SEBAL.save_GeoTiff_proy(dest, water_mask_array_end, WaterMask_outfolder_end_FileName, shape, nband=1)
######################## calculate p-factor by using the Landuse map #########################
ws_p = wb_veg['p-factor']
Array_P = np.zeros([ws_p.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_p.max_row+1):
Value = (ws_p['%s%s' %(j,i)].value)
Array_P[i-2, j_number[j]] = Value
p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_P)):
p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1]
p_factor[p_factor == 0] = 0.5
dst_FileName = os.path.join(temp_folder_PreSEBAL, 'p-factor_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
p_factor[p_factor == 0] = 0.5
dst_pfactor_FileName = os.path.join(output_folder_p_factor,'p_factor.tif')
SEBAL.save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1)
######################## calculate c-factor by using the Landuse map #########################
ws_c = wb_veg['C-factor']
Array_C = np.zeros([ws_c.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_c.max_row+1):
Value = (ws_c['%s%s' %(j,i)].value)
Array_C[i-2, j_number[j]] = Value
c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_C)):
c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1]
c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan
LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
LUE_max[c_factor == 3] = 2.5
LUE_max[c_factor == 4] = 4.5
LUE_max[LUE_max == 0] = 2.5
dst_FileName = os.path.join(temp_folder_PreSEBAL, 'LUE_max_proj.tif')
SEBAL.save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
LUE_max[LUE_max == 0] = 2.5
dst_LUEmax_FileName = os.path.join(output_folder_LUE,'LUE_max.tif')
SEBAL.save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1)
####################################################################################################################
################################################ Write output part 6 ###############################################
####################################################################################################################
############################################# Fill in the additional input sheet #########################################
# things to be filled in:
# Transmissivity (optional)
# NDVI (additional input)
# Albedo (additional input)
# LST (additional input)
# Water Mask (additional input)
# p-factor (soil input)
# c-factor (soil input)
# Vegetation height (meteo input)
# VIIRS parameter copy
VIIRS_Dict = {}
for k, v in SEBAL_RUNS.iteritems():
VIIRS_Dict.setdefault(v['output_folder'], []).append(k)
'''
LST folder = output_folder_HANTS_end
NDVI folder = os.path.join(output_folder_HANTS, 'NDVI')
ALBEDO folder = os.path.join(output_folder_HANTS, 'Albedo')
SAVI folder = os.path.join(output_folder_HANTS, 'SAVI')
'''
VARS = ["NDVI", "Albedo"]
Letter_dict = {"NDVI":'B', "Albedo":'D'}
xfile = load_workbook(inputExcel)
sheet_additional = xfile.get_sheet_by_name('Additional_Input')
sheet_meteo = xfile.get_sheet_by_name('Meteo_Input')
sheet_soil = xfile.get_sheet_by_name('Soil_Input')
sheet_out_name = ''.join([os.path.splitext(os.path.basename(inputExcel))[0],'_SEBAL.xlsx'])
sheet_out_dir = os.path.dirname(inputExcel)
sheet_out_file_name = os.path.join(sheet_out_dir, sheet_out_name)
for output_name_run in VIIRS_Dict.keys():
# Get General parameters
Row_number = VIIRS_Dict[output_name_run][0]
Type_of_Run = SEBAL_RUNS.items()
VIIRS_date = output_name_run.split('_')[-1]
VIIRS_datetime= datetime.strptime(VIIRS_date, '%d%m%Y')
date_run = '%d%02d%02d' %(VIIRS_datetime.year,VIIRS_datetime.month,VIIRS_datetime.day)
# import LST
file_name_LST = os.path.join(output_folder_HANTS_end_sharp, 'LST_surface_temp_sharpened_%s.tif' %date_run )
sheet_additional['E%d'%(Row_number)] = str(file_name_LST)
# import NDVI and Albedo and water mask
for VAR_SINGLE in VARS:
Letter = Letter_dict[VAR_SINGLE]
file_name_VAR_single = os.path.join(output_folder_PreSEBAL, VAR_SINGLE, '%s_PROBAV_%s.tif' %(VAR_SINGLE, date_run))
sheet_additional['%s%d'%(Letter, Row_number)] = str(file_name_VAR_single)
# import Water Mask
sheet_additional['C%d'%(Row_number)] = str(WaterMask_outfolder_end_FileName)
# import p-factor
file_name_p_factor = os.path.join(output_folder_p_factor,'p_factor.tif')
sheet_soil['H%d'%(Row_number)] = str(file_name_p_factor)
# import p-factor
file_name_c_factor = os.path.join(output_folder_LUE, 'LUE_max.tif')
sheet_soil['I%d'%(Row_number)] = str(file_name_c_factor)
# import vegetation height
file_name_vegt_height = os.path.join(output_folder_HANTS_end_Veg,'Vegetation_Height_%s.tif' %date_run)
sheet_meteo['O%d'%(Row_number)] = str(file_name_vegt_height)
xfile.save(sheet_out_file_name)
'''
# If instantanious Transmissivity is calculated in PreSEBAL
if Check_Trans_inst == 1:
sheet['N%d'%(number)] = str(Transmissivity_inst_fileName)
xfile.save(inputExcel)
# If daily Transmissivity is calculated in PreSEBAL
if Check_Trans_24 == 1:
sheet_meteo['K%d'%(number)] = str(Transmissivity_24_fileName)
xfile.save(sheet_out_file_name)
'''
'''
############################################# Create Outlier maps for PROBA-V #########################################
# Create output folder if not exists
output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers')
if not os.path.exists(output_folder_HANTS_outliers):
os.mkdir(output_folder_HANTS_outliers)
fh = Dataset(nc_path_albedo, mode='r')
Var = fh.variables.keys()[-1]
data = fh.variables['outliers'][:]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(data)[2])):
time_now = time[i]
data_now = data[:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1)
############################################ NDVI ##################################################
# Create output folder if not exists
output_folder_HANTS_outliers = os.path.join(output_folder_HANTS, 'Outliers_NDVI')
if not os.path.exists(output_folder_HANTS_outliers):
os.mkdir(output_folder_HANTS_outliers)
fh = Dataset(nc_path_ndvi, mode='r')
Var = fh.variables.keys()[-1]
data = fh.variables['outliers'][:]
lat = fh.variables[fh.variables.keys()[1]][:]
lon = fh.variables[fh.variables.keys()[2]][:]
time = fh.variables[fh.variables.keys()[3]][:]
minimum_lon = np.min(lon)
maximum_lat = np.max(lat)
diff_lon = lon[1] - lon[0]
diff_lat = lat[1] - lat[0]
if not ('shape' in locals() or 'dest' in locals()):
Example_file = os.path.join(output_folder_preprocessing_VAR,Back_name)
dest = gdal.Open(Example_file)
ncol = dest.RasterXSize # Get the reprojected dem column size
nrow = dest.RasterYSize # Get the reprojected dem row size
shape=[ncol, nrow]
for i in range(0,int(np.shape(data)[2])):
time_now = time[i]
data_now = data[:,:,i]
geo = tuple([minimum_lon, diff_lon, 0, maximum_lat, 0, diff_lat])
name_out = os.path.join(output_folder_HANTS_outliers, 'Outliers_PROBAV_%s.tif' %time_now)
SEBAL.save_GeoTiff_proy(dest, data_now, name_out, shape, nband=1)
###################################################### postHANTS Albedo ###############################################
for date in Dates:
year = date.year
month = date.month
day = date.day
filename_outliers = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Outliers\Outliers_PROBAV_%d%02d%02d.tif" %(year,month,day)
filename_VIIRS_original = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year,month,day)
filename_VIIRS_HANTS = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_output\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day)
dest_outliers = gdal.Open(filename_outliers)
dest_VIIRS_original = gdal.Open(filename_VIIRS_original)
dest_VIIRS_HANTS = gdal.Open(filename_VIIRS_HANTS)
if not dest_VIIRS_original == None:
Array_outliers = dest_outliers.GetRasterBand(1).ReadAsArray()[:,:]
Array_VIIRS_original = dest_VIIRS_original.GetRasterBand(1).ReadAsArray()
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()[:,:]
Array_outliers[Array_outliers==-9999.] = 0
Array_outliers_mask = np.zeros(np.shape(Array_outliers))
Array_outliers_mask[Array_outliers==1.]=0
Array_outliers_mask[Array_outliers==0.]=1
Array_outliers_mask[Array_outliers_mask==0]=2
Array_outliers_mask[Array_outliers_mask==1]=0
Array_outliers_mask[Array_outliers_mask==2]=1
Array_outliers_mask = Create_Buffer(Array_outliers_mask)
Array_outliers_mask[Array_outliers_mask==1] = 2
Array_outliers_mask[Array_outliers_mask==0] = 1
Array_outliers_mask[Array_outliers_mask==2] = 0
if np.nansum(Array_outliers_mask) > 30:
Array_outliers_mask[Array_VIIRS_HANTS == 0] = np.nan
Array_VIIRS_original_mask_nan = Array_VIIRS_original * Array_outliers_mask
Array_VIIRS_HANTS_mask_nan = Array_VIIRS_HANTS * Array_outliers_mask
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan.flatten()
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan.flatten()
Array_VIIRS_original_mask_nan_flatten = Array_VIIRS_original_mask_nan_flatten[~np.isnan(Array_VIIRS_original_mask_nan_flatten)]
Array_VIIRS_HANTS_mask_nan_flatten = Array_VIIRS_HANTS_mask_nan_flatten[~np.isnan(Array_VIIRS_HANTS_mask_nan_flatten)]
Array_VIIRS_original_mask_nan_flatten_without_zero =Array_VIIRS_original_mask_nan_flatten[Array_VIIRS_original_mask_nan_flatten>0]
Array_VIIRS_original_mask_value_cold = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,40)
Array_VIIRS_original_mask_value_hot = np.percentile(Array_VIIRS_original_mask_nan_flatten_without_zero,90)
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest = Array_VIIRS_HANTS_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_original_mask_nan_flatten_exc_coldest = Array_VIIRS_original_mask_nan_flatten[np.logical_and(Array_VIIRS_original_mask_nan_flatten > Array_VIIRS_original_mask_value_cold,Array_VIIRS_original_mask_nan_flatten < Array_VIIRS_original_mask_value_hot)]
Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest[Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest==-9999] = np.nan
Array_VIIRS_original_mask_nan_flatten_exc_coldest[Array_VIIRS_original_mask_nan_flatten_exc_coldest==-9999] = np.nan
Ave_VIIRS_HANTS = np.nanmean(Array_VIIRS_HANTS_mask_nan_flatten_exc_coldest)
Ave_VIIRS_original = np.nanmean(Array_VIIRS_original_mask_nan_flatten_exc_coldest)
Factor = Ave_VIIRS_original/Ave_VIIRS_HANTS
Array_VIIRS_HANTS_Corrected = Array_VIIRS_HANTS * Factor
End_array = np.ones(np.shape(Array_outliers_mask)) * np.nan
End_array[Array_outliers_mask==0] =Array_VIIRS_HANTS_Corrected[Array_outliers_mask==0]
End_array[Array_outliers_mask==1] =Array_VIIRS_original[Array_outliers_mask==1]
else:
End_array = Array_VIIRS_HANTS
geo = dest_VIIRS_original.GetGeoTransform()
proj = dest_outliers.GetProjection()
else:
Array_VIIRS_HANTS = dest_VIIRS_HANTS.GetRasterBand(1).ReadAsArray()
End_array = Array_VIIRS_HANTS
dest_test = None
i = 0
while dest_test == None:
date2 = Dates[i]
year2 = date2.year
month2= date2.month
day2 = date2.day
try:
filename_VIIRS_original2 = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_input\Albedo\Albedo_PROBAV_%d%02d%02d.tif" %(year2,month2,day2)
dest_test = gdal.Open(filename_VIIRS_original2)
geo = dest_test.GetGeoTransform()
proj = dest_test.GetProjection()
except:
i+=1
import wa.General.data_conversions as DC
name = r"G:\SEBAL_Tadla\PROBAV-VIIRS\HANTS_end\Albedo\Albedo_PROBAV_%d%02d%02d.tif"%(year,month,day)
DC.Save_as_tiff(name, End_array, geo, proj)
################################## All input is now calculated, so preprosessing can start ########################
# Open preprosessing excel the Vegetation_Height sheet
ws_veg = wb_veg['Vegetation_Height']
# Define output name for the LandUse map
dst_FileName = os.path.join(output_folder,'LU_%s.tif' %Var_name)
# Open LU data
LU_dest = gdal.Open(LU_data_FileName)
LU_data = LU_dest.GetRasterBand(1).ReadAsArray()
# Reproject the LAI to the same projection as LU
dest1, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(LAI_FileName, LU_data_FileName) ## input after HANTS
LAI_proj = dest1.GetRasterBand(1).ReadAsArray()
# Read out the excel file coefficient numbers
Array = np.zeros([ws_veg.max_row-1,4])
for j in ['A','C','D','E']:
j_number={'A' : 0, 'C' : 1, 'D' : 2, 'E' : 3}
for i in range(2,ws_veg.max_row+1):
Value = (ws_veg['%s%s' %(j,i)].value)
Array[i-2, j_number[j]] = Value
# Create maps with the coefficient numbers for the right land cover
coeff = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1]),3])
for coeff_nmbr in range(0,3):
for Class in range(0,len(Array)):
coeff[LU_data==Array[Class,0],coeff_nmbr] = Array[Class,coeff_nmbr+1]
# Get some dimensions of the projected dataset
band_data = dest1.GetRasterBand(1)
ncol_data = dest1.RasterXSize
nrow_data = dest1.RasterYSize
shape_data=[ncol_data, nrow_data]
# Calculate the vegetation height in the LU projection
Veg_Height_proj = coeff[:,:,0] * np.power(LAI_proj,2) + coeff[:,:,1] * LAI_proj + coeff[:,:,2]
Veg_Height_proj = np.clip(Veg_Height_proj, 0, 600)
# Save the vegetation height in the lU projection in the temporary directory
Veg_Height_proj_FileName = os.path.join(output_folder_temp,'Veg_Height_proj.tif')
save_GeoTiff_proy(dest1, Veg_Height_proj, Veg_Height_proj_FileName, shape_data, nband=1)
# Reproject the Veg_height to the LAI projection
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(Veg_Height_proj_FileName, LAI_FileName)
# Get some dimensions of the original dataset
band_data = dest.GetRasterBand(1)
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
# Open the Veg_height with the same projection as LAI
Veg_Height = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
Veg_Height[Veg_Height == 0] = np.nan
# Save Vegetation Height in the end folder
dst_FileName = os.path.join(output_folder,'Vegetation_Height_%s.tif' %Var_name)
save_GeoTiff_proy(dest, Veg_Height, dst_FileName, shape, nband=1)
######################## calculate p-factor by using the Landuse map #########################
ws_p = wb_veg['p-factor']
Array_P = np.zeros([ws_p.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_p.max_row+1):
Value = (ws_p['%s%s' %(j,i)].value)
Array_P[i-2, j_number[j]] = Value
p_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_P)):
p_factor[LU_data==Array_P[Class,0]] = Array_P[Class,1]
p_factor[p_factor == 0] = np.nan
dst_FileName = os.path.join(output_folder_temp,'p-factor_proj.tif')
save_GeoTiff_proy(dest1, p_factor, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
p_factor = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
p_factor[p_factor == 0] = np.nan
dst_pfactor_FileName = os.path.join(output_folder,'p-factor_%s.tif' %Var_name)
save_GeoTiff_proy(dest, p_factor, dst_pfactor_FileName, shape, nband=1)
######################## calculate c-factor by using the Landuse map #########################
ws_c = wb_veg['C-factor']
Array_C = np.zeros([ws_c.max_row-1,2])
for j in ['A','C']:
j_number={'A' : 0, 'C' : 1}
for i in range(2,ws_c.max_row+1):
Value = (ws_c['%s%s' %(j,i)].value)
Array_C[i-2, j_number[j]] = Value
c_factor = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
for Class in range(0,len(Array_C)):
c_factor[LU_data==Array_C[Class,0]] = Array_C[Class,1]
c_factor[np.logical_and(c_factor != 3.0, c_factor != 4.0)] = np.nan
LUE_max = np.zeros([int(np.shape(LU_data)[0]),int(np.shape(LU_data)[1])])
LUE_max[c_factor == 3] = 2.5
LUE_max[c_factor == 4] = 4.5
LUE_max[LUE_max == 0] = np.nan
dst_FileName = os.path.join(output_folder_temp,'LUE_max_proj.tif')
save_GeoTiff_proy(dest1, LUE_max, dst_FileName, shape_data, nband=1)
dest, ulx, lry, lrx, uly, epsg_to = SEBAL.reproject_dataset_example(dst_FileName, LAI_FileName)
band_data = dest.GetRasterBand(1) # Get the reprojected dem band
ncol_data = dest.RasterXSize
nrow_data = dest.RasterYSize
LUE_max = band_data.ReadAsArray(0, 0, ncol_data, nrow_data)
LUE_max[LUE_max == 0] = np.nan
dst_LUEmax_FileName = os.path.join(output_folder,'LUE_max_%s.tif' %Var_name)
save_GeoTiff_proy(dest, LUE_max, dst_LUEmax_FileName, shape, nband=1)
############################# delete temporary directory ########################
shutil.rmtree(output_folder_temp)
#################################################################################
'''
# Functions
#################################################################################
def Create_Buffer(Data_In):
'''
This function creates a 3D array which is used to apply the moving window
'''
Buffer_area = 7 # A block of 2 times Buffer_area + 1 will be 1 if there is the pixel in the middle is 1
Data_Out=np.empty((len(Data_In),len(Data_In[1])))
Data_Out[:,:] = Data_In
for ypixel in range(0,Buffer_area + 1):
for xpixel in range(1,Buffer_area + 1):
if ypixel==0:
for xpixel in range(1,Buffer_area + 1):
Data_Out[:,0:-xpixel] += Data_In[:,xpixel:]
Data_Out[:,xpixel:] += Data_In[:,:-xpixel]
for ypixel in range(1,Buffer_area + 1):
Data_Out[ypixel:,:] += Data_In[:-ypixel,:]
Data_Out[0:-ypixel,:] += Data_In[ypixel:,:]
else:
Data_Out[0:-xpixel,ypixel:] += Data_In[xpixel:,:-ypixel]
Data_Out[xpixel:,ypixel:] += Data_In[:-xpixel,:-ypixel]
Data_Out[0:-xpixel,0:-ypixel] += Data_In[xpixel:,ypixel:]
Data_Out[xpixel:,0:-ypixel] += Data_In[:-xpixel,ypixel:]
Data_Out[Data_Out>0.1] = 1
Data_Out[Data_Out<=0.1] = 0
return(Data_Out)
#------------------------------------------------------------------------------
def Get_epsg(g):
try:
# Get info of the dataset that is used for transforming
gland_proj = g.GetProjection()
Projection=gland_proj.split('EPSG","')
epsg_to=int((str(Projection[-1]).split(']')[0])[0:-1])
except:
epsg_to=4326
print('Was not able to get the projection, so WGS84 is assumed')
return(epsg_to)
#------------------------------------------------------------------------------
def gap_filling(data,NoDataValue):
"""
This function fills the no data gaps in a numpy array
Keyword arguments:
dataset -- Array
NoDataValue -- Value that must be filled
"""
# fill the no data values
if NoDataValue is np.nan:
mask = ~(np.isnan(data))
else:
mask = ~(data==NoDataValue)
xx, yy = np.meshgrid(np.arange(data.shape[1]), np.arange(data.shape[0]))
xym = np.vstack( (np.ravel(xx[mask]), np.ravel(yy[mask])) ).T
data0 = np.ravel( data[:,:][mask] )
interp0 = scipy.interpolate.NearestNDInterpolator( xym, data0 )
data_end = interp0(np.ravel(xx), np.ravel(yy)).reshape( xx.shape )
return (data_end)
#------------------------------------------------------------------------------
if __name__ == '__main__':
main() | apache-2.0 |
xubenben/scikit-learn | sklearn/cross_decomposition/cca_.py | 209 | 3150 | from .pls_ import _PLS
__all__ = ['CCA']
class CCA(_PLS):
"""CCA Canonical Correlation Analysis.
CCA inherits from PLS with mode="B" and deflation_mode="canonical".
Read more in the :ref:`User Guide <cross_decomposition>`.
Parameters
----------
n_components : int, (default 2).
number of components to keep.
scale : boolean, (default True)
whether to scale the data?
max_iter : an integer, (default 500)
the maximum number of iterations of the NIPALS inner loop
tol : non-negative real, default 1e-06.
the tolerance used in the iterative algorithm
copy : boolean
Whether the deflation be done on a copy. Let the default value
to True unless you don't care about side effects
Attributes
----------
x_weights_ : array, [p, n_components]
X block weights vectors.
y_weights_ : array, [q, n_components]
Y block weights vectors.
x_loadings_ : array, [p, n_components]
X block loadings vectors.
y_loadings_ : array, [q, n_components]
Y block loadings vectors.
x_scores_ : array, [n_samples, n_components]
X scores.
y_scores_ : array, [n_samples, n_components]
Y scores.
x_rotations_ : array, [p, n_components]
X block to latents rotations.
y_rotations_ : array, [q, n_components]
Y block to latents rotations.
n_iter_ : array-like
Number of iterations of the NIPALS inner loop for each
component.
Notes
-----
For each component k, find the weights u, v that maximizes
max corr(Xk u, Yk v), such that ``|u| = |v| = 1``
Note that it maximizes only the correlations between the scores.
The residual matrix of X (Xk+1) block is obtained by the deflation on the
current X score: x_score.
The residual matrix of Y (Yk+1) block is obtained by deflation on the
current Y score.
Examples
--------
>>> from sklearn.cross_decomposition import CCA
>>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [3.,5.,4.]]
>>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]]
>>> cca = CCA(n_components=1)
>>> cca.fit(X, Y)
... # doctest: +ELLIPSIS, +NORMALIZE_WHITESPACE
CCA(copy=True, max_iter=500, n_components=1, scale=True, tol=1e-06)
>>> X_c, Y_c = cca.transform(X, Y)
References
----------
Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with
emphasis on the two-block case. Technical Report 371, Department of
Statistics, University of Washington, Seattle, 2000.
In french but still a reference:
Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris:
Editions Technic.
See also
--------
PLSCanonical
PLSSVD
"""
def __init__(self, n_components=2, scale=True,
max_iter=500, tol=1e-06, copy=True):
_PLS.__init__(self, n_components=n_components, scale=scale,
deflation_mode="canonical", mode="B",
norm_y_weights=True, algorithm="nipals",
max_iter=max_iter, tol=tol, copy=copy)
| bsd-3-clause |
hdemeyer/king-phisher | king_phisher/client/tabs/campaign.py | 1 | 27197 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# king_phisher/client/tabs/campaign.py
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of the project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import datetime
import logging
import threading
import time
from king_phisher import find
from king_phisher import ipaddress
from king_phisher import utilities
from king_phisher.client import export
from king_phisher.client import graphs
from king_phisher.client import gui_utilities
from king_phisher.client.widget import extras
from king_phisher.client.widget import managers
from gi.repository import GdkPixbuf
from gi.repository import GLib
from gi.repository import Gtk
from smoke_zephyr.utilities import parse_timespan
UNKNOWN_LOCATION_STRING = 'N/A (Unknown)'
class CampaignViewGenericTab(gui_utilities.GladeGObject):
"""
This object is meant to be subclassed by all of the tabs which load and
display information about the current campaign.
"""
label_text = 'Unknown'
"""The label of the tab for display in the GUI."""
top_gobject = 'box'
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTab, self).__init__(*args, **kwargs)
self.label = Gtk.Label(label=self.label_text)
"""The :py:class:`Gtk.Label` representing this tab with text from :py:attr:`~.CampaignViewGenericTab.label_text`."""
self.is_destroyed = threading.Event()
getattr(self, self.top_gobject).connect('destroy', self.signal_destroy)
self.last_load_time = float('-inf')
"""The last time the data was loaded from the server."""
self.refresh_frequency = parse_timespan(str(self.config.get('gui.refresh_frequency', '5m')))
"""The lifetime in seconds to wait before refreshing the data from the server."""
self.loader_thread = None
"""The thread object which loads the data from the server."""
self.loader_thread_lock = threading.Lock()
"""The :py:class:`threading.Lock` object used for synchronization between the loader and main threads."""
self.loader_thread_stop = threading.Event()
"""The :py:class:`threading.Event` object used to request that the loader thread stop before completion."""
self.application.connect('campaign-set', self.signal_kpc_campaign_set)
def _sync_loader_thread(self):
"""
Synchronize the loader thread by ensuring that it is stopped. If it is
currently running, this will use :py:attr:`~.loader_thread_stop` to
request that the loader stops early.
"""
if not self.loader_thread_is_running:
return
# it's alive so tell it to stop, wait for it, then proceed
self.loader_thread_stop.set()
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.loader_thread.join(1)
@property
def rpc(self):
return self.application.rpc
@property
def loader_thread_is_running(self):
if self.loader_thread is None:
return False
return self.loader_thread.is_alive()
def load_campaign_information(self, force=True):
raise NotImplementedError()
def signal_button_clicked_refresh(self, button):
self.load_campaign_information()
def signal_destroy(self, gobject):
self.is_destroyed.set()
self.loader_thread_stop.set()
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
self.logger.debug("waiting on thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
while self.loader_thread.is_alive():
gui_utilities.gtk_sync()
self.logger.debug("joined thread: {0}.loader_thread (tid: 0x{1:x})".format(self.__class__.__name__, self.loader_thread.ident))
def signal_kpc_campaign_set(self, *_):
self.load_campaign_information()
class CampaignViewGenericTableTab(CampaignViewGenericTab):
"""
This object is meant to be subclassed by tabs which will display
campaign information of different types from specific database
tables. The data in this object is refreshed when multiple events
occur and it uses an internal timer to represent the last time the
data was refreshed.
"""
dependencies = gui_utilities.GladeDependencies(
children=(
'button_refresh',
'treeview_campaign'
)
)
node_query = None
"""
The GraphQL query used to load a particular node from the remote table.
This query is provided with a single parameter of the node's id.
"""
table_name = ''
"""The database table represented by this tab."""
table_query = None
"""
The GraphQL query used to load the desired information from the remote
table. This query is provided with the following three parameters:
campaign, count and cursor.
"""
view_columns = ()
"""The dictionary map of column numbers to column names starting at column 1."""
xlsx_worksheet_options = None
def __init__(self, *args, **kwargs):
super(CampaignViewGenericTableTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
self.treeview_manager = managers.TreeViewManager(
treeview,
selection_mode=Gtk.SelectionMode.MULTIPLE,
cb_delete=self._prompt_to_delete_row,
cb_refresh=self.load_campaign_information
)
self.treeview_manager.set_column_titles(self.view_columns, column_offset=1)
self.popup_menu = self.treeview_manager.get_popup_menu()
"""The :py:class:`Gtk.Menu` object which is displayed when right-clicking in the view area."""
treeview = self.gobjects['treeview_campaign']
store_columns = [str] * (len(self.view_columns) + 1)
store = Gtk.ListStore(*store_columns)
treeview.set_model(store)
self.application.connect('server-connected', self.signal_kp_server_connected)
def signal_kp_server_connected(self, _):
event_id = 'db-' + self.table_name.replace('_', '-')
server_events = self.application.server_events
server_events.subscribe(event_id, ('deleted', 'inserted', 'updated'), ('id', 'campaign_id'))
server_events.connect(event_id, self.signal_server_event_db)
def signal_server_event_db(self, _, event_type, rows):
get_node = lambda id: self.rpc.graphql(self.node_query, {'id': str(id)})['db']['node']
for row in rows:
if str(row.campaign_id) != self.config['campaign_id']:
continue
model = self.gobjects['treeview_campaign'].get_model()
for case in utilities.switch(event_type):
if case('inserted'):
row_data = self.format_node_data(get_node(row.id))
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(row.id))
gui_utilities.glib_idle_add_wait(model.append, row_data)
ti = gui_utilities.gtk_list_store_search(model, str(row.id))
if ti is None:
self.logger.warning("received server db event: {0} for non-existent row {1}:{2}".format(event_type, self.table_name, str(row.id)))
break
if case('deleted'):
model.remove(ti)
break
if case('updated'):
row_data = self.format_node_data(get_node(row.id))
for idx, cell_data in enumerate(row_data, 1):
model[ti][idx] = self.format_cell_data(cell_data)
break
def _prompt_to_delete_row(self, treeview, _):
if isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive():
gui_utilities.show_dialog_warning('Can Not Delete Rows While Loading', self.parent)
return
model = treeview.get_model()
row_ids = [model.get_value(ti, 0) for ti in gui_utilities.gtk_treeview_selection_iterate(treeview)]
if len(row_ids) == 0:
return
elif len(row_ids) == 1:
message = 'Delete This Row?'
else:
message = "Delete These {0:,} Rows?".format(len(row_ids))
if not gui_utilities.show_dialog_yes_no(message, self.parent, 'This information will be lost.'):
return
self.application.emit(self.table_name[:-1] + '-delete', row_ids)
def format_node_data(self, node):
"""
This method is overridden by subclasses to format the raw node
data returned from the server. The length of the list must equal
the number of columns in the table. This method is called for
each node in the remote table by the loader thread.
:param dict node: The node from a GraphQL query representing data for this table.
:return: The formatted row data.
:rtype: list
"""
raise NotImplementedError()
def format_cell_data(self, cell_data, encoding='utf-8'):
"""
This method provides formatting to the individual cell values returned
from the :py:meth:`.format_row_data` function. Values are converted into
a format suitable for reading.
:param cell: The value to format.
:param str encoding: The encoding to use to coerce the return value into a unicode string.
:return: The formatted cell value.
:rtype: str
"""
if isinstance(cell_data, datetime.datetime):
cell_data = utilities.datetime_utc_to_local(cell_data)
return utilities.format_datetime(cell_data, encoding=encoding)
if cell_data is None:
cell_data = ''
elif isinstance(cell_data, int):
cell_data = str(cell_data)
# ensure that the return value is a unicode string
if isinstance(cell_data, bytes):
cell_data = cell_data.decode(encoding)
return cell_data
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the
:py:attr:`~.CampaignViewGenericTab.last_load_time` is compared
with the :py:attr:`~.CampaignViewGenericTab.refresh_frequency` to
check if the information is stale. If the local data is not stale,
this function will return without updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
self.loader_thread_lock.acquire()
self._sync_loader_thread()
self.loader_thread_stop.clear()
store = self.gobjects['treeview_campaign'].get_model()
store.clear()
self.loader_thread = threading.Thread(target=self.loader_thread_routine, args=(store,))
self.loader_thread.daemon = True
self.loader_thread.start()
self.loader_thread_lock.release()
return
def loader_thread_routine(self, store):
"""
The loading routine to be executed within a thread.
:param store: The store object to place the new data.
:type store: :py:class:`Gtk.ListStore`
"""
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', False))
campaign_id = self.config['campaign_id']
count = 500
page_info = {'endCursor': None, 'hasNextPage': True}
while page_info['hasNextPage']:
if self.rpc is None:
break
results = self.rpc.graphql(self.table_query, {'campaign': campaign_id, 'count': count, 'cursor': page_info['endCursor']})
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
for edge in results['db']['campaign'][self.table_name]['edges']:
row_data = self.format_node_data(edge['node'])
row_data = list(map(self.format_cell_data, row_data))
row_data.insert(0, str(edge['node']['id']))
gui_utilities.glib_idle_add_wait(store.append, row_data)
page_info = results['db']['campaign'][self.table_name]['pageInfo']
if self.is_destroyed.is_set():
return
gui_utilities.glib_idle_add_wait(lambda: self.gobjects['treeview_campaign'].set_property('sensitive', True))
self.last_load_time = time.time()
def signal_button_clicked_export(self, button):
self.export_table_to_csv()
def export_table_to_csv(self):
"""Export the data represented by the view to a CSV file."""
if not self.loader_thread_lock.acquire(False) or (isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive()):
gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent)
return
dialog = extras.FileChooserDialog('Export Data', self.parent)
file_name = self.config['campaign_name'] + '.csv'
response = dialog.run_quick_save(file_name)
dialog.destroy()
if not response:
self.loader_thread_lock.release()
return
destination_file = response['target_path']
store = self.gobjects['treeview_campaign'].get_model()
columns = dict(enumerate(('UID',) + self.view_columns))
export.liststore_to_csv(store, destination_file, columns)
self.loader_thread_lock.release()
def export_table_to_xlsx_worksheet(self, worksheet, title_format):
"""
Export the data represented by the view to an XLSX worksheet.
:param worksheet: The destination sheet for the store's data.
:type worksheet: :py:class:`xlsxwriter.worksheet.Worksheet`
:param title_format: The formatting to use for the title row.
:type title_format: :py:class:`xlsxwriter.format.Format`
"""
if not self.loader_thread_lock.acquire(False) or (isinstance(self.loader_thread, threading.Thread) and self.loader_thread.is_alive()):
gui_utilities.show_dialog_warning('Can Not Export Rows While Loading', self.parent)
return
store = self.gobjects['treeview_campaign'].get_model()
columns = dict(enumerate(('UID',) + self.view_columns))
export.liststore_to_xlsx_worksheet(store, worksheet, columns, title_format, xlsx_options=self.xlsx_worksheet_options)
self.loader_thread_lock.release()
class CampaignViewDeaddropTab(CampaignViewGenericTableTab):
"""Display campaign information regarding dead drop connections."""
table_name = 'deaddrop_connections'
label_text = 'Deaddrop'
node_query = """\
query getDeaddropConnection($id: String!) {
db {
node: deaddropConnection(id: $id) {
id
deaddropDeployment { destination }
visitCount
visitorIp
localUsername
localHostname
localIpAddresses
firstVisit
lastVisit
}
}
}
"""
table_query = """\
query getDeaddropConnections($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
deaddropConnections(first: $count, after: $cursor) {
total
edges {
node {
id
deaddropDeployment { destination }
visitCount
visitorIp
localUsername
localHostname
localIpAddresses
firstVisit
lastVisit
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Destination',
'Visit Count',
'IP Address',
'Username',
'Hostname',
'Local IP Addresses',
'First Hit',
'Last Hit'
)
def format_node_data(self, connection):
deploy_details = self.rpc.remote_table_row('deaddrop_deployments', connection.deployment_id, cache=True)
if not deploy_details:
return None
row = (
deploy_details.destination,
connection.visit_count,
connection.visitor_ip,
connection.local_username,
connection.local_hostname,
connection.local_ip_addresses,
connection.first_visit,
connection.last_visit
)
return row
class CampaignViewCredentialsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding submitted credentials."""
table_name = 'credentials'
label_text = 'Credentials'
node_query = """\
query getCredential($id: String!) {
db {
node: credential(id: $id) {
id
message { targetEmail }
username
password
submitted
}
}
}
"""
table_query = """\
query getCredentials($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
credentials(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
username
password
submitted
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'Username',
'Password',
'Submitted'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(20, 30, 30, 30, 25),
title=label_text
)
def __init__(self, *args, **kwargs):
super(CampaignViewCredentialsTab, self).__init__(*args, **kwargs)
treeview = self.gobjects['treeview_campaign']
pwd_column_id = self.view_columns.index('Password')
treeview.get_column(pwd_column_id).set_property('visible', False)
def format_node_data(self, node):
row = (
node['message']['targetEmail'],
node['username'],
node['password'],
node['submitted']
)
return row
def signal_button_toggled_show_passwords(self, button):
treeview = self.gobjects['treeview_campaign']
pwd_column_id = self.view_columns.index('Password')
treeview.get_column(pwd_column_id).set_property('visible', button.get_property('active'))
class CampaignViewDashboardTab(CampaignViewGenericTab):
"""Display campaign information on a graphical dash board."""
dependencies = gui_utilities.GladeDependencies(
children=(
'box_top_left',
'box_top_right',
'box_bottom',
'scrolledwindow_top_left',
'scrolledwindow_top_right',
'scrolledwindow_bottom'
)
)
label_text = 'Dashboard'
"""The tabs label for display in the GUI."""
def __init__(self, *args, **kwargs):
super(CampaignViewDashboardTab, self).__init__(*args, **kwargs)
self.graphs = []
"""The :py:class:`.CampaignGraph` classes represented on the dash board."""
dash_ports = {
# dashboard position, (width, height)
'top_left': (380, 200),
'top_right': (380, 200),
'bottom': (760, 200)
}
for dash_port, details in dash_ports.items():
graph_name = self.config['dashboard.' + dash_port]
cls = graphs.get_graph(graph_name)
if not cls:
self.logger.warning('could not get graph: ' + graph_name)
logo_file_path = find.data_file('king-phisher-icon.svg')
if logo_file_path:
image = Gtk.Image.new_from_pixbuf(GdkPixbuf.Pixbuf.new_from_file_at_size(logo_file_path, 128, 128))
image.show()
self.gobjects['scrolledwindow_' + dash_port].add(image)
continue
graph_inst = cls(self.application, details, getattr(self, self.top_gobject).get_style_context())
self.gobjects['scrolledwindow_' + dash_port].add(graph_inst.canvas)
self.gobjects['box_' + dash_port].pack_end(graph_inst.navigation_toolbar, False, False, 0)
self.graphs.append(graph_inst)
self.logger.debug("dashboard refresh frequency set to {0} seconds".format(self.refresh_frequency))
GLib.timeout_add_seconds(self.refresh_frequency, self.loader_idle_routine)
def load_campaign_information(self, force=True):
"""
Load the necessary campaign information from the remote server.
Unless *force* is True, the :py:attr:`~.last_load_time` is compared with
the :py:attr:`~.refresh_frequency` to check if the information is stale.
If the local data is not stale, this function will return without
updating the table.
:param bool force: Ignore the load life time and force loading the remote data.
"""
if not force and ((time.time() - self.last_load_time) < self.refresh_frequency):
return
if not self.application.rpc:
self.logger.warning('skipping load_campaign_information because rpc is not initialized')
return
with self.loader_thread_lock:
self._sync_loader_thread()
self.loader_thread_stop.clear()
self.loader_thread = threading.Thread(target=self.loader_thread_routine)
self.loader_thread.daemon = True
self.loader_thread.start()
def loader_idle_routine(self):
"""The routine which refreshes the campaign data at a regular interval."""
if self.rpc and not self.loader_thread_is_running:
self.logger.debug('idle loader routine called')
self.load_campaign_information()
return True
def loader_thread_routine(self):
"""The loading routine to be executed within a thread."""
if not 'campaign_id' in self.config:
return
if not self.rpc.remote_table_row('campaigns', self.config['campaign_id']):
return
info_cache = {}
for graph in self.graphs:
if self.loader_thread_stop.is_set():
break
if self.is_destroyed.is_set():
break
info_cache.update(gui_utilities.glib_idle_add_wait(lambda g=graph: g.refresh(info_cache, self.loader_thread_stop)))
else:
self.last_load_time = time.time()
class CampaignViewVisitsTab(CampaignViewGenericTableTab):
"""Display campaign information regarding incoming visitors."""
table_name = 'visits'
label_text = 'Visits'
node_query = """\
query getVisit($id: String!) {
db {
node: visit(id: $id) {
id
message { targetEmail }
visitorIp
visitCount
visitorDetails
visitorGeoloc { city }
firstVisit
lastVisit
}
}
}
"""
table_query = """\
query getVisits($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
visits(first: $count, after: $cursor) {
total
edges {
node {
id
message { targetEmail }
visitorIp
visitCount
visitorDetails
visitorGeoloc { city }
firstVisit
lastVisit
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'IP Address',
'Visit Count',
'Visitor User Agent',
'Visitor Location',
'First Visit',
'Last Visit'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(30, 30, 25, 15, 90, 30, 25, 25),
title=label_text
)
def format_node_data(self, node):
geo_location = UNKNOWN_LOCATION_STRING
visitor_ip = node['visitorIp']
if visitor_ip is None:
visitor_ip = ''
else:
visitor_ip = ipaddress.ip_address(visitor_ip)
if visitor_ip.is_loopback:
geo_location = 'N/A (Loopback)'
elif visitor_ip.is_private:
geo_location = 'N/A (Private)'
elif isinstance(visitor_ip, ipaddress.IPv6Address):
geo_location = 'N/A (IPv6 Address)'
elif node['visitorGeoloc']:
geo_location = node['visitorGeoloc']['city']
row = (
node['message']['targetEmail'],
str(visitor_ip),
node['visitCount'],
node['visitorDetails'],
geo_location,
node['firstVisit'],
node['lastVisit']
)
return row
class CampaignViewMessagesTab(CampaignViewGenericTableTab):
"""Display campaign information regarding sent messages."""
table_name = 'messages'
label_text = 'Messages'
node_query = """\
query getMessage($id: String!) {
db {
node: message(id: $id) {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
}
}
}
"""
table_query = """\
query getMessages($campaign: String!, $count: Int!, $cursor: String) {
db {
campaign(id: $campaign) {
messages(first: $count, after: $cursor) {
total
edges {
node {
id
targetEmail
sent
trained
companyDepartment { name }
opened
openerIp
openerUserAgent
}
}
pageInfo {
endCursor
hasNextPage
}
}
}
}
}
"""
view_columns = (
'Email Address',
'Sent',
'Trained',
'Department',
'Opened',
'Opener IP Address',
'Opener User Agent'
)
xlsx_worksheet_options = export.XLSXWorksheetOptions(
column_widths=(30, 30, 30, 15, 20, 20, 25, 90),
title=label_text
)
def format_node_data(self, node):
department = node['companyDepartment']
if department:
department = department['name']
row = (
node['targetEmail'],
node['sent'],
('Yes' if node['trained'] else ''),
department,
node['opened'],
node['openerIp'],
node['openerUserAgent']
)
return row
class CampaignViewTab(object):
"""
The King Phisher client top-level 'View Campaign' tab. This object
manages the sub-tabs which display all the information regarding
the current campaign.
"""
def __init__(self, parent, application):
"""
:param parent: The parent window for this object.
:type parent: :py:class:`Gtk.Window`
:param application: The main client application instance.
:type application: :py:class:`Gtk.Application`
"""
self.parent = parent
self.application = application
self.config = application.config
self.logger = logging.getLogger('KingPhisher.Client.' + self.__class__.__name__)
self.box = Gtk.Box()
self.box.set_property('orientation', Gtk.Orientation.VERTICAL)
self.box.show()
self.label = Gtk.Label(label='View Campaign')
"""The :py:class:`Gtk.Label` representing this tabs name."""
self.notebook = Gtk.Notebook()
""" The :py:class:`Gtk.Notebook` for holding sub-tabs."""
self.notebook.connect('switch-page', self.signal_notebook_switch_page)
self.notebook.set_scrollable(True)
self.box.pack_start(self.notebook, True, True, 0)
self.tabs = utilities.FreezableDict()
"""A dict object holding the sub tabs managed by this object."""
current_page = self.notebook.get_current_page()
self.last_page_id = current_page
if graphs.has_matplotlib:
self.logger.info('matplotlib is installed, dashboard will be available')
dashboard_tab = CampaignViewDashboardTab(application)
self.tabs['dashboard'] = dashboard_tab
self.notebook.append_page(dashboard_tab.box, dashboard_tab.label)
else:
self.logger.warning('matplotlib is not installed, dashboard will not be available')
messages_tab = CampaignViewMessagesTab(application)
self.tabs['messages'] = messages_tab
self.notebook.append_page(messages_tab.box, messages_tab.label)
visits_tab = CampaignViewVisitsTab(application)
self.tabs['visits'] = visits_tab
self.notebook.append_page(visits_tab.box, visits_tab.label)
credentials_tab = CampaignViewCredentialsTab(application)
self.tabs['credentials'] = credentials_tab
self.notebook.append_page(credentials_tab.box, credentials_tab.label)
if self.config.get('gui.show_deaddrop', False):
deaddrop_connections_tab = CampaignViewDeaddropTab(application)
self.tabs['deaddrop_connections'] = deaddrop_connections_tab
self.notebook.append_page(deaddrop_connections_tab.box, deaddrop_connections_tab.label)
self.tabs.freeze()
for tab in self.tabs.values():
tab.box.show()
self.notebook.show()
def signal_notebook_switch_page(self, notebook, current_page, index):
if not hasattr(self.parent, 'rpc'):
return
#previous_page = notebook.get_nth_page(self.last_page_id)
self.last_page_id = index
for tab in self.tabs.values():
if current_page != tab.box:
continue
if hasattr(tab, 'load_campaign_information'):
tab.load_campaign_information(force=False)
| bsd-3-clause |
hexastorm/opticaldesign | simulation.py | 1 | 3946 | %pylab inline
import warnings
import numpy as np
import matplotlib.pyplot as plt
import rayopt as ro
# Lens used 12.5mm Dia. x 90mm FL, VIS-NIR, Inked, Achromatic Lens from Edmund Optics
# LINK: http://www.edmundoptics.com/document/download/391099
filename='zmax_49332ink.zmx'
with open(filename) as file:
data=file.read()
# Parameters:
wavelength=405e-9 # wavelength [m]
D=0.8 # diameter bundle [mm] see, s.scale=0.001 [m]
T=35 # plate thickness [mm]
utilt=np.radians(0) # tilt angle plate [radians]
# Radius plate
# I can't remember why I use tangent, should not matter
# as long diameter is large enough
spol=T*np.tan(np.pi/8)
# Create the system
s=ro.zemax.zmx_to_system(data)
s.object.pupil.radius = D/2
# Ensures rays created with function ray_point are in the [-D/2,D/2] range
s.object.pupil.update_radius = False
s.object.angle = np.radians(0) # [radians]
s.wavelengths = [wavelength]
s.update()
# changes needed to make the Zemax data compatible with Rayopt
del s[0]
# set physical size of the offset surface, i.e. the left line in the drawing
s[0].radius = 20 # [mm]
# sets the length between the first virtual offset surface and the lens
s[1].distance = 0 # [mm]
# add parallel plate to the system
s.insert(4,ro.elements.Spheroid(distance=10,material='SCHOTT/N-BK7',
diameter=spol*2,angles=[utilt,0,0]))
s.insert(5,ro.elements.Spheroid(distance=T/np.cos(utilt),material='basic/air',
diameter=spol*2,angles=[utilt,0,0]))
#NOTE: due to rotation the thickness increases to T/np.cos(utilt)
# if this is not done the transversal focus shift displacement
# does not agree with the theoretical model
s.update()
#s.align(s.paraxial.n) # used by jordens for astigmatic focus shift, destroys rotation
# astigmatic focus shift , can also be obtained from print(q) and looking at table
#print("Astigmatic focus shift "+str(abs(q.waist_position.T[0][-1])-abs(q.waist_position.T[1][-1])))+" mm.")
# Geometric trace
g = ro.GeometricTrace(s)
# In my system, I am only interested in one field
# with a field angle equal to zero radians
# Several distribution can be chosen; hexapolar, random, radau
# The radau scheme should be able to give a good result while using not so many rays
fieldangle=0
g.rays_point((0, fieldangle), wavelength=wavelength, nrays=20,
distribution="radau", filter=False, clip=False)
# Geometric focus [used]
g.refocus()
q = ro.GaussianTrace(s)
if utilt==0:
fig, ax = plt.subplots()
s.plot(ax)
q.plot(ax, color="red", scale=1)
print("The spot radius is "+str(q.spot_radius[-1][0]*1000))
print("The Gaussian waist radius is "+str(round(q.spot_radius[-1][0]*1000,2))+" micrometers.")
print("The Rayleigh range is "+str(q.rayleigh_range[-1][0])+ " mm.")
# The geometric RMS spotsize is then calculated at the focal point
# i.e. RMS= <(W-<W>)2>1/2
# on default Rayopt specifies the focal point at the last surface
# as it sets i=surface equal to -1.
# all rays are given the same "weight"
print("RMS geometric spotsize is "+str(g.rms()*1000)+" micrometers.")
# The focus point distance is measured with respect to the lens
print("The focus point distance from the lens is "+str(g.path[-1]-g.path[3])+" mm.")
print("The transversal displacement is "+str(g.y[-1,-1,1])+" mm.")
p, qq, opd = g.opd(resample=False)
print("The lambda OPD RMS is "+str(np.sqrt((opd**2 * g.w).sum()/g.w.sum())))
#
p = ro.ParaxialTrace(s)
print("The Airy radius is "+ str(p.airy_radius[1]*1000)+" micrometers.")
# paraxial focus [not used]
#s.paraxial.refocus()
ro.Analysis(s,refocus_full=False, update=False)
# Gaussian trace
# plot only works at ultilt is 0 degrees
# Seidel aberrations
#z = ro.PolyTrace(s)
#str(z)
# Retrieve seidel
#print("\n".join(z.print_seidel()))
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/sklearn/preprocessing/data.py | 1 | 67256 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Eric Martin <eric@ericmart.in>
# Giorgio Patrini <giorgio.patrini@anu.edu.au>
# License: BSD 3 clause
import numbers
import warnings
from itertools import chain, combinations
import numpy as np
from scipy import sparse
from ..utils.sparsefuncs_fast import (inplace_csr_row_normalize_l1,
inplace_csr_row_normalize_l2)
from ..base import BaseEstimator, TransformerMixin
from ..externals import six
from ..utils import check_array
from ..utils import deprecated
from ..utils.extmath import _incremental_mean_and_var
from ..utils.extmath import row_norms
from ..utils.fixes import combinations_with_replacement as combinations_w_r
from ..utils.sparsefuncs import (inplace_column_scale,
mean_variance_axis, incr_mean_variance_axis,
min_max_axis)
from ..utils.validation import check_is_fitted, FLOAT_DTYPES
zip = six.moves.zip
map = six.moves.map
range = six.moves.range
__all__ = [
'Binarizer',
'KernelCenterer',
'MinMaxScaler',
'MaxAbsScaler',
'Normalizer',
'OneHotEncoder',
'RobustScaler',
'StandardScaler',
'add_dummy_feature',
'binarize',
'normalize',
'scale',
'robust_scale',
'maxabs_scale',
'minmax_scale',
]
DEPRECATION_MSG_1D = (
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample."
)
def _handle_zeros_in_scale(scale, copy=True):
''' Makes sure that whenever scale is zero, we handle it correctly.
This happens in most scalers when we have constant features.'''
# if we are fitting on 1D arrays, scale might be a scalar
if np.isscalar(scale):
if scale == .0:
scale = 1.
return scale
elif isinstance(scale, np.ndarray):
if copy:
# New array to avoid side-effects
scale = scale.copy()
scale[scale == 0.0] = 1.0
return scale
def scale(X, axis=0, with_mean=True, with_std=True, copy=True):
"""Standardize a dataset along any axis
Center to the mean and component wise scale to unit variance.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : {array-like, sparse matrix}
The data to center and scale.
axis : int (0 by default)
axis used to compute the means and standard deviations along. If 0,
independently standardize each feature, otherwise (if 1) standardize
each sample.
with_mean : boolean, True by default
If True, center the data before scaling.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSC matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_mean=False` (in that case, only variance scaling will be
performed on the features of the CSC matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSC matrix.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse='csc', copy=copy, ensure_2d=False,
warn_on_dtype=True, estimator='the scale function',
dtype=FLOAT_DTYPES)
if sparse.issparse(X):
if with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` instead"
" See docstring for motivation and alternatives.")
if axis != 0:
raise ValueError("Can only scale sparse matrix on axis=0, "
" got axis=%d" % axis)
if with_std:
_, var = mean_variance_axis(X, axis=0)
var = _handle_zeros_in_scale(var, copy=False)
inplace_column_scale(X, 1 / np.sqrt(var))
else:
X = np.asarray(X)
if with_mean:
mean_ = np.mean(X, axis)
if with_std:
scale_ = np.std(X, axis)
# Xr is a view on the original array that enables easy use of
# broadcasting on the axis in which we are interested in
Xr = np.rollaxis(X, axis)
if with_mean:
Xr -= mean_
mean_1 = Xr.mean(axis=0)
# Verify that mean_1 is 'close to zero'. If X contains very
# large values, mean_1 can also be very large, due to a lack of
# precision of mean_. In this case, a pre-scaling of the
# concerned feature is efficient, for instance by its mean or
# maximum.
if not np.allclose(mean_1, 0):
warnings.warn("Numerical issues were encountered "
"when centering the data "
"and might not be solved. Dataset may "
"contain too large values. You may need "
"to prescale your features.")
Xr -= mean_1
if with_std:
scale_ = _handle_zeros_in_scale(scale_, copy=False)
Xr /= scale_
if with_mean:
mean_2 = Xr.mean(axis=0)
# If mean_2 is not 'close to zero', it comes from the fact that
# scale_ is very small so that mean_2 = mean_1/scale_ > 0, even
# if mean_1 was close to zero. The problem is thus essentially
# due to the lack of precision of mean_. A solution is then to
# subtract the mean again:
if not np.allclose(mean_2, 0):
warnings.warn("Numerical issues were encountered "
"when scaling the data "
"and might not be solved. The standard "
"deviation of the data is probably "
"very close to 0. ")
Xr -= mean_2
return X
class MinMaxScaler(BaseEstimator, TransformerMixin):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
copy : boolean, optional, default True
Set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array).
Attributes
----------
min_ : ndarray, shape (n_features,)
Per feature adjustment for minimum.
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
data_min_ : ndarray, shape (n_features,)
Per feature minimum seen in the data
.. versionadded:: 0.17
*data_min_* instead of deprecated *data_min*.
data_max_ : ndarray, shape (n_features,)
Per feature maximum seen in the data
.. versionadded:: 0.17
*data_max_* instead of deprecated *data_max*.
data_range_ : ndarray, shape (n_features,)
Per feature range ``(data_max_ - data_min_)`` seen in the data
.. versionadded:: 0.17
*data_range_* instead of deprecated *data_range*.
"""
def __init__(self, feature_range=(0, 1), copy=True):
self.feature_range = feature_range
self.copy = copy
@property
@deprecated("Attribute data_range will be removed in "
"0.19. Use ``data_range_`` instead")
def data_range(self):
return self.data_range_
@property
@deprecated("Attribute data_min will be removed in "
"0.19. Use ``data_min_`` instead")
def data_min(self):
return self.data_min_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.min_
del self.n_samples_seen_
del self.data_min_
del self.data_max_
del self.data_range_
def fit(self, X, y=None):
"""Compute the minimum and maximum to be used for later scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of min and max on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y : Passthrough for ``Pipeline`` compatibility.
"""
feature_range = self.feature_range
if feature_range[0] >= feature_range[1]:
raise ValueError("Minimum of desired feature range must be smaller"
" than maximum. Got %s." % str(feature_range))
if sparse.issparse(X):
raise TypeError("MinMaxScaler does no support sparse input. "
"You may consider to use MaxAbsScaler instead.")
X = check_array(X, copy=self.copy, ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
data_min = np.min(X, axis=0)
data_max = np.max(X, axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next steps
else:
data_min = np.minimum(self.data_min_, data_min)
data_max = np.maximum(self.data_max_, data_max)
self.n_samples_seen_ += X.shape[0]
data_range = data_max - data_min
self.scale_ = ((feature_range[1] - feature_range[0]) /
_handle_zeros_in_scale(data_range))
self.min_ = feature_range[0] - data_min * self.scale_
self.data_min_ = data_min
self.data_max_ = data_max
self.data_range_ = data_range
return self
def transform(self, X):
"""Scaling features of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X *= self.scale_
X += self.min_
return X
def inverse_transform(self, X):
"""Undo the scaling of X according to feature_range.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input data that will be transformed. It cannot be sparse.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, copy=self.copy, ensure_2d=False, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
X -= self.min_
X /= self.scale_
return X
def minmax_scale(X, feature_range=(0, 1), axis=0, copy=True):
"""Transforms features by scaling each feature to a given range.
This estimator scales and translates each feature individually such
that it is in the given range on the training set, i.e. between
zero and one.
The transformation is given by::
X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))
X_scaled = X_std * (max - min) + min
where min, max = feature_range.
This transformation is often used as an alternative to zero mean,
unit variance scaling.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
.. versionadded:: 0.17
*minmax_scale* function interface to :class:`sklearn.preprocessing.MinMaxScaler`.
Parameters
----------
feature_range: tuple (min, max), default=(0, 1)
Desired range of transformed data.
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, copy=False, ensure_2d=False, warn_on_dtype=True,
dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MinMaxScaler(feature_range=feature_range, copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class StandardScaler(BaseEstimator, TransformerMixin):
"""Standardize features by removing the mean and scaling to unit variance
Centering and scaling happen independently on each feature by computing
the relevant statistics on the samples in the training set. Mean and
standard deviation are then stored to be used on later data using the
`transform` method.
Standardization of a dataset is a common requirement for many
machine learning estimators: they might behave badly if the
individual feature do not more or less look like standard normally
distributed data (e.g. Gaussian with 0 mean and unit variance).
For instance many elements used in the objective function of
a learning algorithm (such as the RBF kernel of Support Vector
Machines or the L1 and L2 regularizers of linear models) assume that
all features are centered around 0 and have variance in the same
order. If a feature has a variance that is orders of magnitude larger
that others, it might dominate the objective function and make the
estimator unable to learn from other features correctly as expected.
This scaler can also be applied to sparse CSR or CSC matrices by passing
`with_mean=False` to avoid breaking the sparsity structure of the data.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_mean : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_std : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* is recommended instead of deprecated *std_*.
mean_ : array of floats with shape [n_features]
The mean value for each feature in the training set.
var_ : array of floats with shape [n_features]
The variance for each feature in the training set. Used to compute
`scale_`
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
See also
--------
:func:`sklearn.preprocessing.scale` to perform centering and
scaling without using the ``Transformer`` object oriented API
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
"""
def __init__(self, copy=True, with_mean=True, with_std=True):
self.with_mean = with_mean
self.with_std = with_std
self.copy = copy
@property
@deprecated("Attribute ``std_`` will be removed in 0.19. Use ``scale_`` instead")
def std_(self):
return self.scale_
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.mean_
del self.var_
def fit(self, X, y=None):
"""Compute the mean and std to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of mean and std on X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
The algorithm for incremental mean and std is given in Equation 1.5a,b
in Chan, Tony F., Gene H. Golub, and Randall J. LeVeque. "Algorithms
for computing the sample variance: Analysis and recommendations."
The American Statistician 37.3 (1983): 242-247:
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
# Even in the case of `with_mean=False`, we update the mean anyway
# This is needed for the incremental computation of the var
# See incr_mean_variance_axis and _incremental_mean_variance_axis
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.with_std:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_, self.var_ = mean_variance_axis(X, axis=0)
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
self.mean_, self.var_, self.n_samples_seen_ = \
incr_mean_variance_axis(X, axis=0,
last_mean=self.mean_,
last_var=self.var_,
last_n=self.n_samples_seen_)
else:
self.mean_ = None
self.var_ = None
else:
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.mean_ = .0
self.n_samples_seen_ = 0
if self.with_std:
self.var_ = .0
else:
self.var_ = None
self.mean_, self.var_, self.n_samples_seen_ = \
_incremental_mean_and_var(X, self.mean_, self.var_,
self.n_samples_seen_)
if self.with_std:
self.scale_ = _handle_zeros_in_scale(np.sqrt(self.var_))
else:
self.scale_ = None
return self
def transform(self, X, y=None, copy=None):
"""Perform standardization by centering and scaling
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr', copy=copy,
ensure_2d=False, warn_on_dtype=True,
estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot center sparse matrices: pass `with_mean=False` "
"instead. See docstring for motivation and alternatives.")
if self.scale_ is not None:
inplace_column_scale(X, 1 / self.scale_)
else:
if self.with_mean:
X -= self.mean_
if self.with_std:
X /= self.scale_
return X
def inverse_transform(self, X, copy=None):
"""Scale back the data to the original representation
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to scale along the features axis.
"""
check_is_fitted(self, 'scale_')
copy = copy if copy is not None else self.copy
if sparse.issparse(X):
if self.with_mean:
raise ValueError(
"Cannot uncenter sparse matrices: pass `with_mean=False` "
"instead See docstring for motivation and alternatives.")
if not sparse.isspmatrix_csr(X):
X = X.tocsr()
copy = False
if copy:
X = X.copy()
if self.scale_ is not None:
inplace_column_scale(X, self.scale_)
else:
X = np.asarray(X)
if copy:
X = X.copy()
if self.with_std:
X *= self.scale_
if self.with_mean:
X += self.mean_
return X
class MaxAbsScaler(BaseEstimator, TransformerMixin):
"""Scale each feature by its maximum absolute value.
This estimator scales and translates each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0. It does not shift/center the data, and
thus does not destroy any sparsity.
This scaler can also be applied to sparse CSR or CSC matrices.
.. versionadded:: 0.17
Parameters
----------
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
Attributes
----------
scale_ : ndarray, shape (n_features,)
Per feature relative scaling of the data.
.. versionadded:: 0.17
*scale_* attribute.
max_abs_ : ndarray, shape (n_features,)
Per feature maximum absolute value.
n_samples_seen_ : int
The number of samples processed by the estimator. Will be reset on
new calls to fit, but increments across ``partial_fit`` calls.
"""
def __init__(self, copy=True):
self.copy = copy
def _reset(self):
"""Reset internal data-dependent state of the scaler, if necessary.
__init__ parameters are not touched.
"""
# Checking one attribute is enough, becase they are all set together
# in partial_fit
if hasattr(self, 'scale_'):
del self.scale_
del self.n_samples_seen_
del self.max_abs_
def fit(self, X, y=None):
"""Compute the maximum absolute value to be used for later scaling.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the per-feature minimum and maximum
used for later scaling along the features axis.
"""
# Reset internal state before fitting
self._reset()
return self.partial_fit(X, y)
def partial_fit(self, X, y=None):
"""Online computation of max absolute value of X for later scaling.
All of X is processed as a single batch. This is intended for cases
when `fit` is not feasible due to very large number of `n_samples`
or because X is read from a continuous stream.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data used to compute the mean and standard deviation
used for later scaling along the features axis.
y: Passthrough for ``Pipeline`` compatibility.
"""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
mins, maxs = min_max_axis(X, axis=0)
max_abs = np.maximum(np.abs(mins), np.abs(maxs))
else:
max_abs = np.abs(X).max(axis=0)
# First pass
if not hasattr(self, 'n_samples_seen_'):
self.n_samples_seen_ = X.shape[0]
# Next passes
else:
max_abs = np.maximum(self.max_abs_, max_abs)
self.n_samples_seen_ += X.shape[0]
self.max_abs_ = max_abs
self.scale_ = _handle_zeros_in_scale(max_abs)
return self
def transform(self, X, y=None):
"""Scale the data
Parameters
----------
X : {array-like, sparse matrix}
The data that should be scaled.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, 1.0 / self.scale_)
else:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : {array-like, sparse matrix}
The data that should be transformed back.
"""
check_is_fitted(self, 'scale_')
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
inplace_column_scale(X, self.scale_)
else:
X *= self.scale_
return X
def maxabs_scale(X, axis=0, copy=True):
"""Scale each feature to the [-1, 1] range without breaking the sparsity.
This estimator scales each feature individually such
that the maximal absolute value of each feature in the
training set will be 1.0.
This scaler can also be applied to sparse CSR or CSC matrices.
Parameters
----------
axis : int (0 by default)
axis used to scale along. If 0, independently scale each feature,
otherwise (if 1) scale each sample.
copy : boolean, optional, default is True
Set to False to perform inplace scaling and avoid a copy (if the input
is already a numpy array).
"""
# To allow retro-compatibility, we handle here the case of 1D-input
# From 0.17, 1D-input are deprecated in scaler objects
# Although, we want to allow the users to keep calling this function
# with 1D-input.
# Cast input to array, as we need to check ndim. Prior to 0.17, that was
# done inside the scaler object fit_transform.
# If copy is required, it will be done inside the scaler object.
X = check_array(X, accept_sparse=('csr', 'csc'), copy=False,
ensure_2d=False, dtype=FLOAT_DTYPES)
original_ndim = X.ndim
if original_ndim == 1:
X = X.reshape(X.shape[0], 1)
s = MaxAbsScaler(copy=copy)
if axis == 0:
X = s.fit_transform(X)
else:
X = s.fit_transform(X.T).T
if original_ndim == 1:
X = X.ravel()
return X
class RobustScaler(BaseEstimator, TransformerMixin):
"""Scale features using statistics that are robust to outliers.
This Scaler removes the median and scales the data according to
the Interquartile Range (IQR). The IQR is the range between the 1st
quartile (25th quantile) and the 3rd quartile (75th quantile).
Centering and scaling happen independently on each feature (or each
sample, depending on the `axis` argument) by computing the relevant
statistics on the samples in the training set. Median and interquartile
range are then stored to be used on later data using the `transform`
method.
Standardization of a dataset is a common requirement for many
machine learning estimators. Typically this is done by removing the mean
and scaling to unit variance. However, outliers can often influence the
sample mean / variance in a negative way. In such cases, the median and
the interquartile range often give better results.
.. versionadded:: 0.17
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
with_centering : boolean, True by default
If True, center the data before scaling.
This does not work (and will raise an exception) when attempted on
sparse matrices, because centering them entails building a dense
matrix which in common use cases is likely to be too large to fit in
memory.
with_scaling : boolean, True by default
If True, scale the data to interquartile range.
copy : boolean, optional, default is True
If False, try to avoid a copy and do inplace scaling instead.
This is not guaranteed to always work inplace; e.g. if the data is
not a NumPy array or scipy.sparse CSR matrix, a copy may still be
returned.
Attributes
----------
center_ : array of floats
The median value for each feature in the training set.
scale_ : array of floats
The (scaled) interquartile range for each feature in the training set.
.. versionadded:: 0.17
*scale_* attribute.
See also
--------
:class:`sklearn.preprocessing.StandardScaler` to perform centering
and scaling using mean and variance.
:class:`sklearn.decomposition.RandomizedPCA` with `whiten=True`
to further remove the linear correlation across features.
Notes
-----
See examples/preprocessing/plot_robust_scaling.py for an example.
http://en.wikipedia.org/wiki/Median_(statistics)
http://en.wikipedia.org/wiki/Interquartile_range
"""
def __init__(self, with_centering=True, with_scaling=True, copy=True):
self.with_centering = with_centering
self.with_scaling = with_scaling
self.copy = copy
def _check_array(self, X, copy):
"""Makes sure centering is not enabled for sparse matrices."""
X = check_array(X, accept_sparse=('csr', 'csc'), copy=self.copy,
ensure_2d=False, estimator=self, dtype=FLOAT_DTYPES)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_centering:
raise ValueError(
"Cannot center sparse matrices: use `with_centering=False`"
" instead. See docstring for motivation and alternatives.")
return X
def fit(self, X, y=None):
"""Compute the median and quantiles to be used for scaling.
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data used to compute the median and quantiles
used for later scaling along the features axis.
"""
if sparse.issparse(X):
raise TypeError("RobustScaler cannot be fitted on sparse inputs")
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if self.with_centering:
self.center_ = np.median(X, axis=0)
if self.with_scaling:
q = np.percentile(X, (25, 75), axis=0)
self.scale_ = (q[1] - q[0])
self.scale_ = _handle_zeros_in_scale(self.scale_, copy=False)
return self
def transform(self, X, y=None):
"""Center and scale the data
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, 1.0 / self.scale_)
else:
if self.with_centering:
X -= self.center_
if self.with_scaling:
X /= self.scale_
return X
def inverse_transform(self, X):
"""Scale back the data to the original representation
Parameters
----------
X : array-like
The data used to scale along the specified axis.
"""
if self.with_centering:
check_is_fitted(self, 'center_')
if self.with_scaling:
check_is_fitted(self, 'scale_')
X = self._check_array(X, self.copy)
if X.ndim == 1:
warnings.warn(DEPRECATION_MSG_1D, DeprecationWarning)
if sparse.issparse(X):
if self.with_scaling:
inplace_column_scale(X, self.scale_)
else:
if self.with_scaling:
X *= self.scale_
if self.with_centering:
X += self.center_
return X
def robust_scale(X, axis=0, with_centering=True, with_scaling=True, copy=True):
"""Standardize a dataset along any axis
Center to the median and component wise scale
according to the interquartile range.
Read more in the :ref:`User Guide <preprocessing_scaler>`.
Parameters
----------
X : array-like
The data to center and scale.
axis : int (0 by default)
axis used to compute the medians and IQR along. If 0,
independently scale each feature, otherwise (if 1) scale
each sample.
with_centering : boolean, True by default
If True, center the data before scaling.
with_scaling : boolean, True by default
If True, scale the data to unit variance (or equivalently,
unit standard deviation).
copy : boolean, optional, default is True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
Notes
-----
This implementation will refuse to center scipy.sparse matrices
since it would make them non-sparse and would potentially crash the
program with memory exhaustion problems.
Instead the caller is expected to either set explicitly
`with_centering=False` (in that case, only variance scaling will be
performed on the features of the CSR matrix) or to call `X.toarray()`
if he/she expects the materialized dense array to fit in memory.
To avoid memory copy the caller should pass a CSR matrix.
See also
--------
:class:`sklearn.preprocessing.RobustScaler` to perform centering and
scaling using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
s = RobustScaler(with_centering=with_centering, with_scaling=with_scaling,
copy=copy)
if axis == 0:
return s.fit_transform(X)
else:
return s.fit_transform(X.T).T
class PolynomialFeatures(BaseEstimator, TransformerMixin):
"""Generate polynomial and interaction features.
Generate a new feature matrix consisting of all polynomial combinations
of the features with degree less than or equal to the specified degree.
For example, if an input sample is two dimensional and of the form
[a, b], the degree-2 polynomial features are [1, a, b, a^2, ab, b^2].
Parameters
----------
degree : integer
The degree of the polynomial features. Default = 2.
interaction_only : boolean, default = False
If true, only interaction features are produced: features that are
products of at most ``degree`` *distinct* input features (so not
``x[1] ** 2``, ``x[0] * x[2] ** 3``, etc.).
include_bias : boolean
If True (default), then include a bias column, the feature in which
all polynomial powers are zero (i.e. a column of ones - acts as an
intercept term in a linear model).
Examples
--------
>>> X = np.arange(6).reshape(3, 2)
>>> X
array([[0, 1],
[2, 3],
[4, 5]])
>>> poly = PolynomialFeatures(2)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0., 0., 1.],
[ 1., 2., 3., 4., 6., 9.],
[ 1., 4., 5., 16., 20., 25.]])
>>> poly = PolynomialFeatures(interaction_only=True)
>>> poly.fit_transform(X)
array([[ 1., 0., 1., 0.],
[ 1., 2., 3., 6.],
[ 1., 4., 5., 20.]])
Attributes
----------
powers_ : array, shape (n_input_features, n_output_features)
powers_[i, j] is the exponent of the jth input in the ith output.
n_input_features_ : int
The total number of input features.
n_output_features_ : int
The total number of polynomial output features. The number of output
features is computed by iterating over all suitably sized combinations
of input features.
Notes
-----
Be aware that the number of features in the output array scales
polynomially in the number of features of the input array, and
exponentially in the degree. High degrees can cause overfitting.
See :ref:`examples/linear_model/plot_polynomial_interpolation.py
<example_linear_model_plot_polynomial_interpolation.py>`
"""
def __init__(self, degree=2, interaction_only=False, include_bias=True):
self.degree = degree
self.interaction_only = interaction_only
self.include_bias = include_bias
@staticmethod
def _combinations(n_features, degree, interaction_only, include_bias):
comb = (combinations if interaction_only else combinations_w_r)
start = int(not include_bias)
return chain.from_iterable(comb(range(n_features), i)
for i in range(start, degree + 1))
@property
def powers_(self):
check_is_fitted(self, 'n_input_features_')
combinations = self._combinations(self.n_input_features_, self.degree,
self.interaction_only,
self.include_bias)
return np.vstack(np.bincount(c, minlength=self.n_input_features_)
for c in combinations)
def fit(self, X, y=None):
"""
Compute number of output features.
"""
n_samples, n_features = check_array(X).shape
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
self.n_input_features_ = n_features
self.n_output_features_ = sum(1 for _ in combinations)
return self
def transform(self, X, y=None):
"""Transform data to polynomial features
Parameters
----------
X : array-like, shape [n_samples, n_features]
The data to transform, row by row.
Returns
-------
XP : np.ndarray shape [n_samples, NP]
The matrix of features, where NP is the number of polynomial
features generated from the combination of inputs.
"""
check_is_fitted(self, ['n_input_features_', 'n_output_features_'])
X = check_array(X, dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
if n_features != self.n_input_features_:
raise ValueError("X shape does not match training shape")
# allocate output data
XP = np.empty((n_samples, self.n_output_features_), dtype=X.dtype)
combinations = self._combinations(n_features, self.degree,
self.interaction_only,
self.include_bias)
for i, c in enumerate(combinations):
XP[:, i] = X[:, c].prod(1)
return XP
def normalize(X, norm='l2', axis=1, copy=True, return_norm=False):
"""Scale input vectors individually to unit norm (vector length).
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample (or each non-zero
feature if axis is 0).
axis : 0 or 1, optional (1 by default)
axis used to normalize the data along. If 1, independently normalize
each sample, otherwise (if 0) normalize each feature.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix and if axis is 1).
return_norm : boolean, default False
whether to return the computed norms
See also
--------
:class:`sklearn.preprocessing.Normalizer` to perform normalization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
if norm not in ('l1', 'l2', 'max'):
raise ValueError("'%s' is not a supported norm" % norm)
if axis == 0:
sparse_format = 'csc'
elif axis == 1:
sparse_format = 'csr'
else:
raise ValueError("'%d' is not a supported axis" % axis)
X = check_array(X, sparse_format, copy=copy, warn_on_dtype=True,
estimator='the normalize function', dtype=FLOAT_DTYPES)
if axis == 0:
X = X.T
if sparse.issparse(X):
if norm == 'l1':
inplace_csr_row_normalize_l1(X)
elif norm == 'l2':
inplace_csr_row_normalize_l2(X)
elif norm == 'max':
_, norms = min_max_axis(X, 1)
norms = norms.repeat(np.diff(X.indptr))
mask = norms != 0
X.data[mask] /= norms[mask]
else:
if norm == 'l1':
norms = np.abs(X).sum(axis=1)
elif norm == 'l2':
norms = row_norms(X)
elif norm == 'max':
norms = np.max(X, axis=1)
norms = _handle_zeros_in_scale(norms, copy=False)
X /= norms[:, np.newaxis]
if axis == 0:
X = X.T
if return_norm:
return X, norms
else:
return X
class Normalizer(BaseEstimator, TransformerMixin):
"""Normalize samples individually to unit norm.
Each sample (i.e. each row of the data matrix) with at least one
non zero component is rescaled independently of other samples so
that its norm (l1 or l2) equals one.
This transformer is able to work both with dense numpy arrays and
scipy.sparse matrix (use CSR format if you want to avoid the burden of
a copy / conversion).
Scaling inputs to unit norms is a common operation for text
classification or clustering for instance. For instance the dot
product of two l2-normalized TF-IDF vectors is the cosine similarity
of the vectors and is the base similarity metric for the Vector
Space Model commonly used by the Information Retrieval community.
Read more in the :ref:`User Guide <preprocessing_normalization>`.
Parameters
----------
norm : 'l1', 'l2', or 'max', optional ('l2' by default)
The norm to use to normalize each non zero sample.
copy : boolean, optional, default True
set to False to perform inplace row normalization and avoid a
copy (if the input is already a numpy array or a scipy.sparse
CSR matrix).
Notes
-----
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
See also
--------
:func:`sklearn.preprocessing.normalize` equivalent function
without the object oriented API
"""
def __init__(self, norm='l2', copy=True):
self.norm = norm
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
X = check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Scale each non zero row of X to unit norm
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to normalize, row by row. scipy.sparse matrices should be
in CSR format to avoid an un-necessary copy.
"""
copy = copy if copy is not None else self.copy
X = check_array(X, accept_sparse='csr')
return normalize(X, norm=self.norm, axis=1, copy=copy)
def binarize(X, threshold=0.0, copy=True):
"""Boolean thresholding of array-like or scipy.sparse matrix
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR or CSC format to avoid an
un-necessary copy.
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy
(if the input is already a numpy array or a scipy.sparse CSR / CSC
matrix and if axis is 1).
See also
--------
:class:`sklearn.preprocessing.Binarizer` to perform binarization
using the ``Transformer`` API (e.g. as part of a preprocessing
:class:`sklearn.pipeline.Pipeline`)
"""
X = check_array(X, accept_sparse=['csr', 'csc'], copy=copy)
if sparse.issparse(X):
if threshold < 0:
raise ValueError('Cannot binarize a sparse matrix with threshold '
'< 0')
cond = X.data > threshold
not_cond = np.logical_not(cond)
X.data[cond] = 1
X.data[not_cond] = 0
X.eliminate_zeros()
else:
cond = X > threshold
not_cond = np.logical_not(cond)
X[cond] = 1
X[not_cond] = 0
return X
class Binarizer(BaseEstimator, TransformerMixin):
"""Binarize data (set feature values to 0 or 1) according to a threshold
Values greater than the threshold map to 1, while values less than
or equal to the threshold map to 0. With the default threshold of 0,
only positive values map to 1.
Binarization is a common operation on text count data where the
analyst can decide to only consider the presence or absence of a
feature rather than a quantified number of occurrences for instance.
It can also be used as a pre-processing step for estimators that
consider boolean random variables (e.g. modelled using the Bernoulli
distribution in a Bayesian setting).
Read more in the :ref:`User Guide <preprocessing_binarization>`.
Parameters
----------
threshold : float, optional (0.0 by default)
Feature values below or equal to this are replaced by 0, above it by 1.
Threshold may not be less than 0 for operations on sparse matrices.
copy : boolean, optional, default True
set to False to perform inplace binarization and avoid a copy (if
the input is already a numpy array or a scipy.sparse CSR matrix).
Notes
-----
If the input is a sparse matrix, only the non-zero values are subject
to update by the Binarizer class.
This estimator is stateless (besides constructor parameters), the
fit method does nothing but is useful when used in a pipeline.
"""
def __init__(self, threshold=0.0, copy=True):
self.threshold = threshold
self.copy = copy
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
check_array(X, accept_sparse='csr')
return self
def transform(self, X, y=None, copy=None):
"""Binarize each element of X
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
The data to binarize, element by element.
scipy.sparse matrices should be in CSR format to avoid an
un-necessary copy.
"""
copy = copy if copy is not None else self.copy
return binarize(X, threshold=self.threshold, copy=copy)
class KernelCenterer(BaseEstimator, TransformerMixin):
"""Center a kernel matrix
Let K(x, z) be a kernel defined by phi(x)^T phi(z), where phi is a
function mapping x to a Hilbert space. KernelCenterer centers (i.e.,
normalize to have zero mean) the data without explicitly computing phi(x).
It is equivalent to centering phi(x) with
sklearn.preprocessing.StandardScaler(with_std=False).
Read more in the :ref:`User Guide <kernel_centering>`.
"""
def fit(self, K, y=None):
"""Fit KernelCenterer
Parameters
----------
K : numpy array of shape [n_samples, n_samples]
Kernel matrix.
Returns
-------
self : returns an instance of self.
"""
K = check_array(K, dtype=FLOAT_DTYPES)
n_samples = K.shape[0]
self.K_fit_rows_ = np.sum(K, axis=0) / n_samples
self.K_fit_all_ = self.K_fit_rows_.sum() / n_samples
return self
def transform(self, K, y=None, copy=True):
"""Center kernel matrix.
Parameters
----------
K : numpy array of shape [n_samples1, n_samples2]
Kernel matrix.
copy : boolean, optional, default True
Set to False to perform inplace computation.
Returns
-------
K_new : numpy array of shape [n_samples1, n_samples2]
"""
check_is_fitted(self, 'K_fit_all_')
K = check_array(K, copy=copy, dtype=FLOAT_DTYPES)
K_pred_cols = (np.sum(K, axis=1) /
self.K_fit_rows_.shape[0])[:, np.newaxis]
K -= self.K_fit_rows_
K -= K_pred_cols
K += self.K_fit_all_
return K
def add_dummy_feature(X, value=1.0):
"""Augment dataset with an additional dummy feature.
This is useful for fitting an intercept term with implementations which
cannot otherwise fit it directly.
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Data.
value : float
Value to use for the dummy feature.
Returns
-------
X : {array, sparse matrix}, shape [n_samples, n_features + 1]
Same data with dummy feature added as first column.
Examples
--------
>>> from sklearn.preprocessing import add_dummy_feature
>>> add_dummy_feature([[0, 1], [1, 0]])
array([[ 1., 0., 1.],
[ 1., 1., 0.]])
"""
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'], dtype=FLOAT_DTYPES)
n_samples, n_features = X.shape
shape = (n_samples, n_features + 1)
if sparse.issparse(X):
if sparse.isspmatrix_coo(X):
# Shift columns to the right.
col = X.col + 1
# Column indices of dummy feature are 0 everywhere.
col = np.concatenate((np.zeros(n_samples), col))
# Row indices of dummy feature are 0, ..., n_samples-1.
row = np.concatenate((np.arange(n_samples), X.row))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.coo_matrix((data, (row, col)), shape)
elif sparse.isspmatrix_csc(X):
# Shift index pointers since we need to add n_samples elements.
indptr = X.indptr + n_samples
# indptr[0] must be 0.
indptr = np.concatenate((np.array([0]), indptr))
# Row indices of dummy feature are 0, ..., n_samples-1.
indices = np.concatenate((np.arange(n_samples), X.indices))
# Prepend the dummy feature n_samples times.
data = np.concatenate((np.ones(n_samples) * value, X.data))
return sparse.csc_matrix((data, indices, indptr), shape)
else:
klass = X.__class__
return klass(add_dummy_feature(X.tocoo(), value))
else:
return np.hstack((np.ones((n_samples, 1)) * value, X))
def _transform_selected(X, transform, selected="all", copy=True):
"""Apply a transform function to portion of selected features
Parameters
----------
X : {array-like, sparse matrix}, shape [n_samples, n_features]
Dense array or sparse matrix.
transform : callable
A callable transform(X) -> X_transformed
copy : boolean, optional
Copy X even if it could be avoided.
selected: "all" or array of indices or mask
Specify which features to apply the transform to.
Returns
-------
X : array or sparse matrix, shape=(n_samples, n_features_new)
"""
if isinstance(selected, six.string_types) and selected == "all":
return transform(X)
X = check_array(X, accept_sparse='csc', copy=copy, dtype=FLOAT_DTYPES)
if len(selected) == 0:
return X
n_features = X.shape[1]
ind = np.arange(n_features)
sel = np.zeros(n_features, dtype=bool)
sel[np.asarray(selected)] = True
not_sel = np.logical_not(sel)
n_selected = np.sum(sel)
if n_selected == 0:
# No features selected.
return X
elif n_selected == n_features:
# All features selected.
return transform(X)
else:
X_sel = transform(X[:, ind[sel]])
X_not_sel = X[:, ind[not_sel]]
if sparse.issparse(X_sel) or sparse.issparse(X_not_sel):
return sparse.hstack((X_sel, X_not_sel))
else:
return np.hstack((X_sel, X_not_sel))
class OneHotEncoder(BaseEstimator, TransformerMixin):
"""Encode categorical integer features using a one-hot aka one-of-K scheme.
The input to this transformer should be a matrix of integers, denoting
the values taken on by categorical (discrete) features. The output will be
a sparse matrix where each column corresponds to one possible value of one
feature. It is assumed that input features take on values in the range
[0, n_values).
This encoding is needed for feeding categorical data to many scikit-learn
estimators, notably linear models and SVMs with the standard kernels.
Read more in the :ref:`User Guide <preprocessing_categorical_features>`.
Parameters
----------
n_values : 'auto', int or array of ints
Number of values per feature.
- 'auto' : determine value range from training data.
- int : number of categorical values per feature.
Each feature value should be in ``range(n_values)``
- array : ``n_values[i]`` is the number of categorical values in
``X[:, i]``. Each feature value should be in ``range(n_values[i])``
categorical_features: "all" or array of indices or mask
Specify what features are treated as categorical.
- 'all' (default): All features are treated as categorical.
- array of indices: Array of categorical feature indices.
- mask: Array of length n_features and with dtype=bool.
Non-categorical features are always stacked to the right of the matrix.
dtype : number type, default=np.float
Desired dtype of output.
sparse : boolean, default=True
Will return sparse matrix if set True else will return an array.
handle_unknown : str, 'error' or 'ignore'
Whether to raise an error or ignore if a unknown categorical feature is
present during transform.
Attributes
----------
active_features_ : array
Indices for active features, meaning values that actually occur
in the training set. Only available when n_values is ``'auto'``.
feature_indices_ : array of shape (n_features,)
Indices to feature ranges.
Feature ``i`` in the original data is mapped to features
from ``feature_indices_[i]`` to ``feature_indices_[i+1]``
(and then potentially masked by `active_features_` afterwards)
n_values_ : array of shape (n_features,)
Maximum number of values per feature.
Examples
--------
Given a dataset with three features and two samples, we let the encoder
find the maximum value per feature and transform the data to a binary
one-hot encoding.
>>> from sklearn.preprocessing import OneHotEncoder
>>> enc = OneHotEncoder()
>>> enc.fit([[0, 0, 3], [1, 1, 0], [0, 2, 1], \
[1, 0, 2]]) # doctest: +ELLIPSIS
OneHotEncoder(categorical_features='all', dtype=<... 'numpy.float64'>,
handle_unknown='error', n_values='auto', sparse=True)
>>> enc.n_values_
array([2, 3, 4])
>>> enc.feature_indices_
array([0, 2, 5, 9])
>>> enc.transform([[0, 1, 1]]).toarray()
array([[ 1., 0., 0., 1., 0., 0., 1., 0., 0.]])
See also
--------
sklearn.feature_extraction.DictVectorizer : performs a one-hot encoding of
dictionary items (also handles string-valued features).
sklearn.feature_extraction.FeatureHasher : performs an approximate one-hot
encoding of dictionary items or strings.
"""
def __init__(self, n_values="auto", categorical_features="all",
dtype=np.float64, sparse=True, handle_unknown='error'):
self.n_values = n_values
self.categorical_features = categorical_features
self.dtype = dtype
self.sparse = sparse
self.handle_unknown = handle_unknown
def fit(self, X, y=None):
"""Fit OneHotEncoder to X.
Parameters
----------
X : array-like, shape [n_samples, n_feature]
Input array of type int.
Returns
-------
self
"""
self.fit_transform(X)
return self
def _fit_transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
if self.n_values == 'auto':
n_values = np.max(X, axis=0) + 1
elif isinstance(self.n_values, numbers.Integral):
if (np.max(X, axis=0) >= self.n_values).any():
raise ValueError("Feature out of bounds for n_values=%d"
% self.n_values)
n_values = np.empty(n_features, dtype=np.int)
n_values.fill(self.n_values)
else:
try:
n_values = np.asarray(self.n_values, dtype=int)
except (ValueError, TypeError):
raise TypeError("Wrong type for parameter `n_values`. Expected"
" 'auto', int or array of ints, got %r"
% type(X))
if n_values.ndim < 1 or n_values.shape[0] != X.shape[1]:
raise ValueError("Shape mismatch: if n_values is an array,"
" it has to be of shape (n_features,).")
self.n_values_ = n_values
n_values = np.hstack([[0], n_values])
indices = np.cumsum(n_values)
self.feature_indices_ = indices
column_indices = (X + indices[:-1]).ravel()
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)
data = np.ones(n_samples * n_features)
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
mask = np.array(out.sum(axis=0)).ravel() != 0
active_features = np.where(mask)[0]
out = out[:, active_features]
self.active_features_ = active_features
return out if self.sparse else out.toarray()
def fit_transform(self, X, y=None):
"""Fit OneHotEncoder to X, then transform X.
Equivalent to self.fit(X).transform(X), but more convenient and more
efficient. See fit for the parameters, transform for the return value.
"""
return _transform_selected(X, self._fit_transform,
self.categorical_features, copy=True)
def _transform(self, X):
"""Assumes X contains only categorical features."""
X = check_array(X, dtype=np.int)
if np.any(X < 0):
raise ValueError("X needs to contain only non-negative integers.")
n_samples, n_features = X.shape
indices = self.feature_indices_
if n_features != indices.shape[0] - 1:
raise ValueError("X has different shape than during fitting."
" Expected %d, got %d."
% (indices.shape[0] - 1, n_features))
# We use only those categorical features of X that are known using fit.
# i.e lesser than n_values_ using mask.
# This means, if self.handle_unknown is "ignore", the row_indices and
# col_indices corresponding to the unknown categorical feature are
# ignored.
mask = (X < self.n_values_).ravel()
if np.any(~mask):
if self.handle_unknown not in ['error', 'ignore']:
raise ValueError("handle_unknown should be either error or "
"unknown got %s" % self.handle_unknown)
if self.handle_unknown == 'error':
raise ValueError("unknown categorical feature present %s "
"during transform." % X.ravel()[~mask])
column_indices = (X + indices[:-1]).ravel()[mask]
row_indices = np.repeat(np.arange(n_samples, dtype=np.int32),
n_features)[mask]
data = np.ones(np.sum(mask))
out = sparse.coo_matrix((data, (row_indices, column_indices)),
shape=(n_samples, indices[-1]),
dtype=self.dtype).tocsr()
if self.n_values == 'auto':
out = out[:, self.active_features_]
return out if self.sparse else out.toarray()
def transform(self, X):
"""Transform X using one-hot encoding.
Parameters
----------
X : array-like, shape [n_samples, n_features]
Input array of type int.
Returns
-------
X_out : sparse matrix if sparse=True else a 2-d array, dtype=int
Transformed input.
"""
return _transform_selected(X, self._transform,
self.categorical_features, copy=True)
| mit |
ryanvarley/ExoData | exodata/astroclasses.py | 1 | 42589 | """ Contains structural classes ie binary, star, planet etc which mimic the xml
structure with objects
"""
import sys
import math
from pkg_resources import resource_stream
import logging
import numpy as np
import astropy.coordinates
import astropy.units as u
from . import equations as eq
from . import astroquantities as aq
from . import assumptions as assum
from . import flags
from . import params as ed_params
logger = logging.getLogger('')
class _BaseObject(object):
def __init__(self, params=None):
self.children = []
self.parent = False
self.classType = 'BaseObject'
self.flags = flags.Flags()
self.params = {}
if params is not None:
self._updateParams(params) # TODO value validator?
def _addChild(self, child):
self.children.append(child)
def _updateParams(self, params):
""" This method updates parameters allowing for any validation / unit additions in the near future
"""
self.params.update(params)
def _getParentClass(self, startClass, parentClass):
""" gets the parent class by calling successive parent classes with .parent until parentclass is matched.
"""
try:
if not startClass: # reached system with no hits
raise AttributeError
except AttributeError: # i.e calling binary on an object without one
raise HierarchyError('This object ({0}) has no {1} as a parent object'.format(self.name, parentClass))
if startClass.classType == parentClass:
return startClass
else:
return self._getParentClass(startClass.parent, parentClass)
@property
def name(self): # TODO variable for altnames
try:
return self.params['name']
except KeyError:
try:
return self.parent.name
except AttributeError:
return 'Un-named ' + self.classType
except AttributeError:
return 'Un-named ' + self.classType
def __repr__(self):
return '{0}({1!r})'.format(self.classType, self.name)
def getParam(self, paramKey):
""" Fetches a parameter from the params dictionary. If it's not there it will return NaN. This allows the use
of list comprehensions over the entire planet set without KeyErrors.
NaN was used as unlike False and None, NaN < 1 and NaN > 1 are both False
"""
try:
return self.params[paramKey]
except KeyError:
return np.NaN
def __eq__(self, other):
""" check the parameter dictionaries for both clases are the same (and both are of the same class)
"""
if type(self) == type(other):
return self.params == other.params
else:
return False
@property
def system(self):
return self._getParentClass(self.parent, 'System')
class System(_BaseObject):
def __init__(self, *args, **kwargs):
_BaseObject.__init__(self, *args, **kwargs)
self.classType = 'System'
@property
def ra(self):
return self.getParam('rightascension')
@ra.setter
def ra(self, ra):
self.params['rightascension'] = ra
@property
def dec(self):
return self.getParam('declination')
@dec.setter
def dec(self, dec):
self.params['declination'] = dec
@property
def d(self):
return self.getParam('distance')
@d.setter
def d(self, d):
d = d.rescale(aq.pc)
self.params['distance'] = d
@property
def stars(self):
return self.children # TODO child could be a binary or planet
@property
def epoch(self):
return self.getParam('epoch')
@epoch.setter
def epoch(self, epoch):
self.params['epoch'] = epoch
class PlanetAndBinaryCommon(_BaseObject):
def __init__(self, *args, **kwargs):
_BaseObject.__init__(self, *args, **kwargs)
self.classType = 'PlanetAndBinaryCommon'
@property
def i(self):
return self.getParam('inclination')
@i.setter
def i(self, i):
i = i.rescale(aq.deg)
self.params['inclination'] = i
@property
def e(self):
return self.getParam('eccentricity')
@e.setter
def e(self, e):
self.params['eccentricity'] = e
@property
def P(self):
period = self.getParam('period')
if period is not np.nan:
return period
elif ed_params.estimateMissingValues:
self.flags.addFlag('Calculated Period')
return self.calcPeriod()
else:
return np.nan
@P.setter
def P(self, P):
P = P.rescale(aq.day)
self.params['period'] = P
def calcPeriod(self):
raise NotImplementedError('Only implemented for Binary and Planet child classes')
@property
def a(self):
sma = self.getParam('semimajoraxis')
if sma is np.nan and ed_params.estimateMissingValues:
if self.getParam('period') is not np.nan:
sma = self.calcSMA() # calc using period
self.flags.addFlag('Calculated SMA')
return sma
else:
return np.nan
else:
return sma
@a.setter
def a(self, a):
a = a.rescale(aq.au)
self.params['semimajoraxis'] = a
def calcSMA(self):
raise NotImplementedError('Only implemented for Binary and Planet child classes')
@property
def transittime(self):
return self.getParam('transittime')
@transittime.setter
def transittime(self, transittime):
self.params['transittime'] = transittime
@property
def periastron(self):
peri = self.getParam('periastron')
if math.isnan(peri) and self.e == 0:
peri = 0 * aq.deg
return peri
@periastron.setter
def periastron(self, periastron):
self.params['periastron'] = periastron
@property
def longitude(self):
return self.getParam('longitude')
@longitude.setter
def longitude(self, longitude):
self.params['longitude'] = longitude
@property
def ascendingnode(self):
return self.getParam('ascendingnode')
@ascendingnode.setter
def ascendingnode(self, ascendingnode):
self.params['ascendingnode'] = ascendingnode
@property
def separation(self):
return self.getParam('separation')
@separation.setter
def seperation(self, seperation):
self.params['seperation'] = seperation
class StarAndBinaryCommon(_BaseObject):
def __init__(self, *args, **kwargs):
_BaseObject.__init__(self, *args, **kwargs)
self.classType = 'StarAndBinaryCommon'
@property
def magU(self):
return self.getParam('magU')
@magU.setter
def magU(self, mag):
self.params['magU'] = mag
@property
def magB(self):
return self.getParam('magB')
@magB.setter
def magB(self, mag):
self.params['magB'] = mag
@property
def magH(self):
return self.getParam('magH')
@magH.setter
def magH(self, mag):
self.params['magH'] = mag
@property
def magI(self):
return self.getParam('magI')
@magI.setter
def magI(self, mag):
self.params['magI'] = mag
@property
def magJ(self):
return self.getParam('magJ')
@magJ.setter
def magJ(self, mag):
self.params['magJ'] = mag
@property
def magK(self):
return self.getParam('magK')
@magK.setter
def magK(self, mag):
self.params['magK'] = mag
@property
def magV(self):
return self.getParam('magV')
@magV.setter
def magV(self, mag):
self.params['magV'] = mag
@property
def magL(self):
return self.getParam('magL')
@magL.setter
def magL(self, mag):
self.params['magL'] = mag
@property
def magM(self):
return self.getParam('magM')
@magM.setter
def magM(self, mag):
self.params['magM'] = mag
@property
def magN(self):
return self.getParam('magN')
@magN.setter
def magN(self, mag):
self.params['magN'] = mag
class StarAndPlanetCommon(_BaseObject):
def __init__(self, *args, **kwargs):
_BaseObject.__init__(self, *args, **kwargs)
self.classType = 'StarAndPlanetCommon'
@property
def age(self):
return self.getParam('age')
@age.setter
def age(self, age):
age = age.rescale(aq.Gyear)
self.params['age'] = age
@property # allows stars and planets to access system values by propagating up
def ra(self):
return self.parent.ra
@ra.setter
def ra(self, ra):
self.parent.ra = ra
@property
def dec(self):
return self.parent.dec
@dec.setter
def dec(self, dec):
self.parent.dec = dec
@property
def d(self):
return self.parent.d
@d.setter
def d(self, d):
self.parent.d = d
@property
def R(self):
return self.getParam('radius')
@R.setter
def R(self, R):
self.params['radius'] = R
@property
def T(self):
""" Looks for the temperature in the catalogue, if absent it calculates it using calcTemperature()
:return: planet temperature
"""
paramTemp = self.getParam('temperature')
if not paramTemp is np.nan:
return paramTemp
elif ed_params.estimateMissingValues:
self.flags.addFlag('Calculated Temperature')
return self.calcTemperature()
else:
return np.nan
@T.setter
def T(self, T):
T = T.rescale(aq.K)
self.params['temperature'] = T
@property
def M(self):
return self.getParam('mass')
@M.setter
def M(self, M):
M = M.rescale(aq.M_j)
self.params['mass'] = M
def calcTemperature(self):
raise NotImplementedError('Only implemented for Stars and Planet child classes')
@property
def binary(self):
return self._getParentClass(self, 'Binary')
def calcSurfaceGravity(self):
return eq.SurfaceGravity(self.M, self.R).g
def calcLogg(self):
return eq.Logg(self.M, self.R).logg
def calcDensity(self):
if self.M is np.nan or self.R is np.nan:
return np.nan
else:
return eq.Density(self.M, self.R).density
class Binary(PlanetAndBinaryCommon, StarAndBinaryCommon): # TODO add binary methods and variables, remove unused one from starcommon
def __init__(self, *args, **kwargs):
StarAndBinaryCommon.__init__(self, *args, **kwargs)
PlanetAndBinaryCommon.__init__(self, *args, **kwargs)
self.classType = 'Binary'
@property
def stars(self):
return self.children
@property
def d(self):
return self.parent.d
def calcPeriod(self):
raise NotImplementedError # TODO
def calcSMA(self):
raise NotImplementedError # TODO
class Star(StarAndPlanetCommon, StarAndBinaryCommon):
def __init__(self, *args, **kwargs):
StarAndPlanetCommon.__init__(self, *args, **kwargs)
self.classType = 'Star'
@property
def d(self):
""" Note this should work from child parents as .d propergates, calculates using the star estimation method
estimateDistance and estimateAbsoluteMagnitude
"""
# TODO this will only work from a star or below. good thing?
d = self.parent.d
if ed_params.estimateMissingValues:
if d is np.nan:
d = self.estimateDistance()
if d is not np.nan:
self.flags.addFlag('Estimated Distance')
return d
else:
return np.nan
def calcLuminosity(self):
return eq.StellarLuminosity(self.R, self.T).L
def calcTemperature(self):
""" uses equations.starTemperature to estimate temperature based on main sequence relationship
"""
return eq.estimateStellarTemperature(self.M)
def _get_or_convert_magnitude(self, mag_letter):
""" Takes input of the magnitude letter and ouputs the magnitude fetched from the catalogue or a converted value
:return:
"""
allowed_mags = "UBVJIHKLMN"
catalogue_mags = 'BVIJHK'
if mag_letter not in allowed_mags or not len(mag_letter) == 1:
raise ValueError("Magnitude letter must be a single letter in {0}".format(allowed_mags))
mag_str = 'mag'+mag_letter
mag_val = self.getParam(mag_str)
if isNanOrNone(mag_val) and ed_params.estimateMissingValues: # then we need to estimate it!
# old style dict comprehension for python 2.6
mag_dict = dict(('mag'+letter, self.getParam('mag'+letter)) for letter in catalogue_mags)
mag_class = Magnitude(self.spectralType, **mag_dict)
try:
mag_conversion = mag_class.convert(mag_letter)
# logger.debug('Star Class: Conversion to {0} successful, got {1}'.format(mag_str, mag_conversion))
self.flags.addFlag('Estimated mag{0}'.format(mag_letter))
return mag_conversion
except ValueError as e: # cant convert
logger.exception(e)
# logger.debug('Cant convert to {0}'.format(mag_letter))
return np.nan
else:
# logger.debug('returning {0}={1} from catalogue'.format(mag_str, mag_val))
return mag_val
@property
def magU(self):
return self._get_or_convert_magnitude('U')
@property
def magB(self):
return self._get_or_convert_magnitude('B')
@property
def magV(self):
return self._get_or_convert_magnitude('V')
@property
def magJ(self):
return self._get_or_convert_magnitude('J')
@property
def magI(self):
return self._get_or_convert_magnitude('I')
@property
def magH(self):
return self._get_or_convert_magnitude('H')
@property
def magK(self):
return self._get_or_convert_magnitude('K')
@property
def magL(self):
return self._get_or_convert_magnitude('L')
@property
def magM(self):
return self._get_or_convert_magnitude('M')
@property
def magN(self):
return self._get_or_convert_magnitude('N')
@property
def Z(self):
return self.getParam('metallicity')
@Z.setter
def Z(self, Z):
self.params['metallicity'] = Z
@property
def spectralType(self):
return self.getParam('spectraltype')
@spectralType.setter
def spectralType(self, spectraltype):
self.params['spectraltype'] = spectraltype
@property
def planets(self):
return self.children
def getLimbdarkeningCoeff(self, wavelength=1.22): # TODO replace with pylightcurve
""" Looks up quadratic limb darkening parameter from the star based on T, logg and metalicity.
:param wavelength: microns
:type wavelength: float
:return: limb darkening coefficients 1 and 2
"""
# TODO check this returns correct value - im not certain
# The intervals of values in the tables
tempind = [ 3500., 3750., 4000., 4250., 4500., 4750., 5000., 5250., 5500., 5750., 6000., 6250.,
6500., 6750., 7000., 7250., 7500., 7750., 8000., 8250., 8500., 8750., 9000., 9250.,
9500., 9750., 10000., 10250., 10500., 10750., 11000., 11250., 11500., 11750., 12000., 12250.,
12500., 12750., 13000., 14000., 15000., 16000., 17000., 19000., 20000., 21000., 22000., 23000.,
24000., 25000., 26000., 27000., 28000., 29000., 30000., 31000., 32000., 33000., 34000., 35000.,
36000., 37000., 38000., 39000., 40000., 41000., 42000., 43000., 44000., 45000., 46000., 47000.,
48000., 49000., 50000.]
lggind = [0., 0.5, 1., 1.5, 2., 2.5, 3., 3.5, 4., 4.5, 5.]
mhind = [-5., -4.5, -4., -3.5, -3., -2.5, -2., -1.5, -1., -0.5, -0.3, -0.2, -0.1, 0., 0.1, 0.2, 0.3, 0.5, 1.]
# Choose the values in the table nearest our parameters
tempselect = _findNearest(tempind, float(self.T))
lgselect = _findNearest(lggind, float(self.calcLogg()))
mhselect = _findNearest(mhind, float(self.Z))
quadratic_filepath = resource_stream(__name__, 'data/quadratic.dat')
coeffTable = np.loadtxt(quadratic_filepath)
foundValues = False
for i in range(len(coeffTable)):
if coeffTable[i, 2] == lgselect and coeffTable[i, 3] == tempselect and coeffTable[i, 4] == mhselect:
if coeffTable[i, 0] == 1:
u1array = coeffTable[i, 8:] # Limb darkening parameter u1 for each wl in waveind
u2array = coeffTable[i+1, 8:]
foundValues = True
break
if not foundValues:
raise ValueError('No limb darkening values could be found') # TODO replace with better exception
waveind = [0.365, 0.445, 0.551, 0.658, 0.806, 1.22, 1.63, 2.19, 3.45] # Wavelengths available in table
# Interpolates the value at wavelength from values in the table (waveind)
u1AtWavelength = np.interp(wavelength, waveind, u1array, left=0, right=0)
u2AtWavelength = np.interp(wavelength, waveind, u2array, left=0, right=0)
return u1AtWavelength, u2AtWavelength
def estimateAbsoluteMagnitude(self):
return eq.estimateAbsoluteMagnitude(self.spectralType)
def estimateDistance(self):
# TODO handle other mags than V
if self.magV is not np.nan:
return eq.estimateDistance(self.magV, self.estimateAbsoluteMagnitude())
else:
return np.nan
class Planet(StarAndPlanetCommon, PlanetAndBinaryCommon):
def __init__(self, *args, **kwargs):
StarAndPlanetCommon.__init__(self, *args, **kwargs)
PlanetAndBinaryCommon.__init__(self, *args, **kwargs)
self.classType = 'Planet'
@property
def isTransiting(self):
""" Checks the the istransiting tag to see if the planet transits. Note that this only works as of catalogue
version ee12343381ae4106fd2db908e25ffc537a2ee98c (11th March 2014) where the istransiting tag was implemented
"""
try:
isTransiting = self.params['istransiting']
except KeyError:
return False
if isTransiting == '1':
return True
else:
return False
def calcTransitDuration(self, circular=False):
""" Estimation of the primary transit time assuming a circular orbit (see :py:func:`equations.transitDuration`)
"""
try:
if circular:
return eq.transitDurationCircular(self.P, self.star.R, self.R, self.a, self.i)
else:
return eq.TransitDuration(self.P, self.a, self.R, self.star.R, self.i, self.e, self.periastron).Td
except (ValueError,
AttributeError, # caused by trying to rescale nan i.e. missing i value
HierarchyError): # i.e. planets that dont orbit stars
return np.nan
def calcScaleHeight(self):
raise NotImplementedError
# return eq.scaleHeight(self.T, , self.g) # TODO mu based on assumptions
def calcTransitDepth(self):
return eq.TransitDepth(self.star.R, self.R).depth
def type(self):
return assum.planetType(self.T, self.M, self.R)
def massType(self):
return assum.planetMassType(self.M)
def radiusType(self):
return assum.planetRadiusType(self.R)
def tempType(self):
return assum.planetTempType(self.T)
@property
def mu(self): # TODO make getter look in params first calc if not
molweight = self.getParam('molweight')
if molweight is np.nan: # Use assumptions
if self.M is not np.nan:
return assum.planetMu(self.massType())
elif self.R is not np.nan:
return assum.planetMu(self.radiusType())
else:
return np.nan
else:
return molweight
@mu.setter
def mu(self, mu):
mu = mu.rescale(aq.atomic_mass_unit)
self.params['moleight'] = mu
@property
def albedo(self):
albedo = self.getParam('albedo')
if albedo is not np.nan:
return albedo
elif self.getParam('temperature') is not np.nan:
planetClass = self.tempType()
elif self.M is not np.nan:
planetClass = self.massType()
elif self.R is not np.nan:
planetClass = self.radiusType()
else:
return np.nan
return assum.planetAlbedo(planetClass)
@albedo.setter
def albedo(self, albedo):
albedo = albedo
self.params['albedo'] = albedo
def calcTemperature(self):
""" Calculates the temperature using which uses equations.MeanPlanetTemp, albedo assumption and potentially
equations.starTemperature.
issues
- you cant get the albedo assumption without temp but you need it to calculate the temp.
"""
try:
return eq.MeanPlanetTemp(self.albedo, self.star.T, self.star.R, self.a).T_p
except (ValueError, HierarchyError): # ie missing value (.a) returning nan
return np.nan
def estimateMass(self):
density = assum.planetDensity(self.radiusType())
return eq.Density(None, self.R, density).M
def calcSMA(self):
""" Calculates the semi-major axis from Keplers Third Law
"""
try:
return eq.KeplersThirdLaw(None, self.star.M, self.P).a
except HierarchyError:
return np.nan
def calcSMAfromT(self, epsilon=0.7):
""" Calculates the semi-major axis based on planet temperature
"""
return eq.MeanPlanetTemp(self.albedo(), self.star.T, self.star.R, epsilon, self.T).a
def calcPeriod(self):
""" calculates period using a and stellar mass
"""
return eq.KeplersThirdLaw(self.a, self.star.M).P
@property
def discoveryMethod(self):
return self.getParam('discoverymethod')
@discoveryMethod.setter
def discoveryMethod(self, discoverymethod):
self.params['discoverymethod'] = discoverymethod
@property
def discoveryYear(self):
try:
return int(self.getParam('discoveryyear'))
except ValueError: # np.nan
return self.getParam('discoveryyear')
@discoveryYear.setter
def discoveryYear(self, discoveryYear):
self.params['discoveryyear'] = discoveryYear
@property
def lastUpdate(self):
return self.getParam('lastupdate')
@property
def description(self):
return self.getParam('description')
@property
def star(self):
return self._getParentClass(self.parent, 'Star')
class Parameters(object): # TODO would this subclassing dict be more preferable?
""" A class to hold parameter dictionaries, the input can be validated, units added and handling of multi valued
fields. In future this may be better as a child of dict.
"""
def __init__(self):
self.params = {
'altnames': [],
'list': [],
}
self._defaultUnits = { # this holds quantities with no current or projected ambiguity about their unit
'age': aq.Gyear,
'distance': aq.pc, # TODO more specific unit handling here or in classes?
'magB': 1,
'magH': 1,
'magI': 1,
'magJ': 1,
'magK': 1,
'magV': 1,
'temperature': aq.K,
}
self.rejectTags = ('system', 'binary', 'star', 'planet', 'moon') # These are handled in their own classes
def addParam(self, key, value, attrib=None):
""" Checks the key dosnt already exist, adds alternate names to a seperate list
Future
- format input and add units
- logging
"""
if key in self.rejectTags:
return False # TODO Replace with exception
# Temporary code to handle the seperation tag than can occur several times with different units.
# TODO code a full multi unit solution (github issue #1)
if key == 'separation':
if attrib is None:
return False # reject seperations without a unit
try:
if not attrib['unit'] == 'AU':
return False # reject for now
except KeyError: # a seperation attribute exists but not one for units
return False
if key in self.params: # if already exists
if key == 'name':
try: # if flagged as a primary or popular name use this one, an option should be made to use either
if attrib['type'] == 'pri': # first names or popular names.
oldname = self.params['name']
self.params['altnames'].append(oldname)
self.params['name'] = value
else:
self.params['altnames'].append(value)
except (KeyError, TypeError): # KeyError = no type key in attrib dict, TypeError = not a dict
self.params['altnames'].append(value)
elif key == 'list':
self.params['list'].append(value)
else:
try:
name = self.params['name']
except KeyError:
name = 'Unnamed'
print('rejected duplicate {0}: {1} in {2}'.format(key, value, name)) # TODO: log rejected value
return False # TODO Replace with exception
else: # If the key doesn't already exist and isn't rejected
# Some tags have no value but a upperlimit in the attributes
if value is None and attrib is not None:
try:
value = attrib['upperlimit']
except KeyError:
try:
value = attrib['lowerlimit']
except KeyError:
return False
if key == 'rightascension':
value = _ra_string_to_unit(value)
elif key == 'declination':
value = _dec_string_to_unit(value)
elif key in self._defaultUnits:
try:
value = float(value) * self._defaultUnits[key]
except:
print('caught an error with {0} - {1}'.format(key, value))
self.params[key] = value
class BinaryParameters(Parameters):
def __init__(self):
Parameters.__init__(self)
self._defaultUnits.update({
'separation': aq.au, # TODO there is actually 2 different measurements (other is arcsec)
'periastron': aq.deg,
})
class StarParameters(Parameters):
def __init__(self):
Parameters.__init__(self)
self._defaultUnits.update({
'mass': aq.M_s,
'metallicity': 1,
'radius': aq.R_s,
})
class PlanetParameters(Parameters):
def __init__(self):
Parameters.__init__(self)
self._defaultUnits.update({
'discoveryyear': 1,
'mass': aq.M_j,
'radius': aq.R_j,
'inclination': aq.deg,
'eccentricity': 1,
'periastron': aq.deg,
'period': aq.day,
'semimajoraxis': aq.au,
'transittime': aq.JD, # TODO specific JD, MJF etc
'molweight': aq.atomic_mass_unit,
'separation': aq.au, # TODO there is actually 2 different measurements (other is arcsec)
})
def _findNearest(arr, value):
""" Finds the value in arr that value is closest to
"""
arr = np.array(arr)
# find nearest value in array
idx = (abs(arr-value)).argmin()
return arr[idx]
class SpectralType(object):
""" Takes input of a spectral type as a string and interprets it into the luminosity class and stellar type.
.. usage :
self.lumType = Luminosity Class
self.classLetter = Stellar Class (ie O B A etc)
self.classNumber = Stellar Class number
self.specClass = ie A8V will be A8
self.specType = ie A*V will be A8V (default for calling the class)
self.original = the original string
This class ignores spaces, only considers the first class if given multiple options (ie K0/K1V, GIV/V, F8-G0)
ignores non-typical star classes (ie ) and ignores extra statements like G8 V+
"""
def __init__(self, classString):
self.original = classString
self.lumType = ''
self.classLetter = ''
self.classNumber = ''
self._parseSpecType(classString)
@property
def specClass(self):
""" Spectral class ie A8V is A8 """
return self.classLetter + str(self.classNumber)
@property
def roundedSpecClass(self):
""" Spectral class with rounded class number ie A8.5V is A9 """
try:
classnumber = str(int(np.around(self.classNumber)))
except TypeError:
classnumber = str(self.classNumber)
return self.classLetter + classnumber
@property
def specType(self):
""" Spectral class ie A8V is A8V """
return self.classLetter + str(self.classNumber) + self.lumType
@property
def roundedSpecType(self):
""" Spectral class with rounded class number ie A8.5V is A9V """
return self.roundedSpecClass + self.lumType
def __repr__(self):
return self.specType
def _parseSpecType(self, classString):
""" This class attempts to parse the spectral type. It should probably use more advanced matching use regex
"""
try:
classString = str(classString)
except UnicodeEncodeError:
# This is for the benefit of 1RXS1609 which currently has the spectral type K7\pm 1V
# TODO add unicode support and handling for this case / ammend the target
return False
# some initial cases
if classString == '' or classString == 'nan':
return False
possNumbers = range(10)
possLType = ('III', 'II', 'Iab', 'Ia0', 'Ia', 'Ib', 'IV', 'V') # in order of unique matches
# remove spaces, remove slashes
classString = classString.replace(' ', '')
classString = classString.replace('-', '/')
classString = classString.replace('\\', '/')
classString = classString.split('/')[0] # TODO we do not consider slashed classes yet (intemediates)
# check first 3 chars for spectral types
stellarClass = classString[:3]
if stellarClass in _possSpectralClasses:
self.classLetter = stellarClass
elif stellarClass[:2] in _possSpectralClasses: # needed because A5V wouldnt match before
self.classLetter = stellarClass[:2]
elif stellarClass[0] in _possSpectralClasses:
self.classLetter = stellarClass[0]
else:
return False # assume a non standard class and fail
# get number
try:
numIndex = len(self.classLetter)
classNum = int(classString[numIndex])
if classNum in possNumbers:
self.classNumber = int(classNum) # don't consider decimals here, done at the type check
typeString = classString[numIndex+1:]
else:
return False # invalid number received
except IndexError: # reached the end of the string
return True
except ValueError: # i.e its a letter - fail # TODO multi letter checking
typeString = classString[1:]
if typeString == '': # ie there is no more information as in 'A8'
return True
# Now check for a decimal and handle those cases
if typeString[0] == '.':
# handle decimal cases, we check each number in turn, add them as strings and then convert to float and add
# to original number
decimalNumbers = '.'
for number in typeString[1:]:
try:
if int(number) in possNumbers:
decimalNumbers += number
else:
print('Something went wrong in decimal checking') # TODO replace with logging
return False # somethings gone wrong
except ValueError:
break # recevied a non-number (probably L class)
# add decimal to classNum
try:
self.classNumber += float(decimalNumbers)
except ValueError: # probably trying to convert '.' to a float
pass
typeString = typeString[len(decimalNumbers):]
if len(typeString) is 0:
return True
# Handle luminosity class
for possL in possLType: # match each possible case in turn (in order of uniqueness)
Lcase = typeString[:len(possL)] # match from front with length to minimise matching say IV in '<3 CIV'
if possL == Lcase:
self.lumType = possL
return True
if not self.classNumber == '':
return True
else: # if there no number asumme we have a name ie 'Catac. var.'
self.classLetter = ''
self.classNumber = ''
self.lumType = ''
return False
_ExampleSystemCount = 1 # Used by example.py - put here to enable global
# main sequence
_possSingleLetterClasses = ('O', 'B', 'A', 'F', 'G', 'K', 'M',
'L', 'T', 'Y', # dwarfs
'C', 'S',
'W', # Wolf-Rayet
'P', 'Q', # Non-stellar spectral types
)
# skipped carbon stars with dashes ie C-R
_possMultiLetterClasses = ('WNE', 'WNL', 'WCE', 'WCL', 'WO', 'WR', 'WN', 'WC', # Wolf-Rayet stars, WN/C skipped
'MS', 'MC', # intermediary carbon-related classes
'DAB', 'DAO', 'DAZ', 'DBZ', # Extended white dwarf spectral types
'DAV', 'DBV', 'DCV', # Variable star designations, GW Vir (DOV and PNNV) skipped
'DA', 'DB', 'DO', 'DQ', 'DZ', 'DC', 'DX', # white dwarf spectral types
)
_possSpectralClasses = _possMultiLetterClasses + _possSingleLetterClasses # multi first
class Magnitude(object):
""" Holds measured magnitudes and can convert between them given a spectral class.
"""
def __init__(self, spectral_type, magU=None, magB=None, magV=None, magI=None, magJ=None, magH=None, magK=None, magL=None,
magM=None, magN=None):
if isinstance(spectral_type, SpectralType):
self.spectral_type = spectral_type
else:
self.spectral_type = SpectralType(spectral_type)
self.magU = magU
self.magB = magB
self.magV = magV
self.magI = magI
self.magJ = magJ
self.magH = magH
self.magK = magK
self.magL = magL
self.magM = magM
self.magN = magN
# For magDict, these should probably be grouped together
self.column_for_V_conversion = {
# mag column, sign (most are V-Mag (+1), some are Mag-V (-1))
'U': (2, -1),
'B': (3, -1),
'J': (8, +1),
'H': (9, +1),
'K': (10, +1),
'L': (11, +1),
'M': (12, +1),
'N': (13, +1),
}
def convert(self, to_mag, from_mag=None):
""" Converts magnitudes using UBVRIJHKLMNQ photometry in Taurus-Auriga (Kenyon+ 1995)
ReadMe+ftp1995ApJS..101..117K Colors for main-sequence stars
If from_mag isn't specified the program will cycle through provided magnitudes and choose one. Note that all
magnitudes are first converted to V, and then to the requested magnitude.
:param to_mag: magnitude to convert to
:param from_mag: magnitude to convert from
:return:
"""
allowed_mags = "UBVJIHKLMN"
if from_mag:
if to_mag == 'V': # If V mag is requested (1/3) - from mag specified
return self._convert_to_from('V', from_mag)
if from_mag == 'V':
magV = self.magV
else:
magV = self._convert_to_from('V', from_mag)
return self._convert_to_from(to_mag, 'V', magV)
# if we can convert from any magnitude, try V first
elif not isNanOrNone(self.magV):
if to_mag == 'V': # If V mag is requested (2/3) - no need to convert
return self.magV
else:
return self._convert_to_from(to_mag, 'V', self.magV)
else: # Otherwise lets try all other magnitudes in turn
order = "UBJHKLMN" # V is the intermediate step from the others, done by default if possible
for mag_letter in order:
try:
magV = self._convert_to_from('V', mag_letter)
if to_mag == 'V': # If V mag is requested (3/3) - try all other mags to convert
logging.debug('Converted to magV from {0} got {1}'.format(mag_letter, magV))
return magV
else:
mag_val = self._convert_to_from(to_mag, 'V', magV)
logging.debug('Converted to mag{0} from {1} got {2}'.format(to_mag, mag_letter, mag_val))
return mag_val
except ValueError:
continue # this conversion may not be possible, try another
raise ValueError('Could not convert from any provided magnitudes')
def _convert_to_from(self, to_mag, from_mag, fromVMag=None):
""" Converts from or to V mag using the conversion tables
:param to_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param from_mag: uppercase magnitude letter i.e. 'V' or 'K'
:param fromVMag: MagV if from_mag is 'V'
:return: estimated magnitude for to_mag from from_mag
"""
lumtype = self.spectral_type.lumType
# rounds decimal types, TODO perhaps we should interpolate?
specClass = self.spectral_type.roundedSpecClass
if not specClass: # TODO investigate implications of this
raise ValueError('Can not convert when no spectral class is given')
if lumtype not in ('V', ''):
raise ValueError("Can only convert for main sequence stars. Got {0} type".format(lumtype))
if to_mag == 'V':
col, sign = self.column_for_V_conversion[from_mag]
try: # TODO replace with pandas table
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
from_mag_val = self.__dict__['mag'+from_mag] # safer than eval
if isNanOrNone(from_mag_val):
# logger.debug('2 '+from_mag)
raise ValueError('You cannot convert from a magnitude you have not specified in class')
return from_mag_val + (offset*sign)
elif from_mag == 'V':
if fromVMag is None:
# trying to second guess here could mess up a K->B calulation by using the intermediate measured V. While
# this would probably be preferable it is not was was asked and therefore could give unexpected results
raise ValueError('Must give fromVMag, even if it is self.magV')
col, sign = self.column_for_V_conversion[to_mag]
try:
offset = float(magDict[specClass][col])
except KeyError:
raise ValueError('No data available to convert those magnitudes for that spectral type')
if math.isnan(offset):
raise ValueError('No data available to convert those magnitudes for that spectral type')
else:
return fromVMag + (offset*sign*-1) # -1 as we are now converting the other way
else:
raise ValueError('Can only convert from and to V magnitude. Use .convert() instead')
def _createMagConversionDict():
""" loads magnitude_conversion.dat which is table A% 1995ApJS..101..117K
"""
magnitude_conversion_filepath = resource_stream(__name__, 'data/magnitude_conversion.dat')
raw_table = np.loadtxt(magnitude_conversion_filepath, '|S5')
magDict = {}
for row in raw_table:
if sys.hexversion >= 0x03000000:
starClass = row[1].decode("utf-8") # otherwise we get byte ints or b' caused by 2to3
tableData = [x.decode("utf-8") for x in row[3:]]
else:
starClass = row[1]
tableData = row[3:]
magDict[starClass] = tableData
return magDict
magDict = _createMagConversionDict()
def isNanOrNone(val):
""" Tests if val is float('nan') or None using math.isnan and is None. Needed as isnan fails if a non float is given.
:param val:
:return:
"""
if val is None:
return True
else:
try:
return math.isnan(val)
except TypeError: # not a float
return False
def _ra_string_to_unit(ra_string):
ra_split = ra_string.split(' ')
hour, min, sec = ra_split
ra_astropy_format = '{}h{}m{}s'.format(hour, min, sec)
ra_unit = astropy.coordinates.Longitude(ra_astropy_format, unit=u.deg)
return ra_unit
def _dec_string_to_unit(dec_string):
deg_split = dec_string.split(' ')
deg, arcmin, arcsec = deg_split
deg_astropy_format = '{}d{}m{}s'.format(deg, arcmin, arcsec)
dec_unit = astropy.coordinates.Latitude(deg_astropy_format, unit=u.deg)
return dec_unit
class HierarchyError(ed_params.ExoDataError):
pass
| mit |
nathania/networkx | examples/algorithms/blockmodel.py | 32 | 3009 | #!/usr/bin/env python
# encoding: utf-8
"""
Example of creating a block model using the blockmodel function in NX. Data used is the Hartford, CT drug users network:
@article{,
title = {Social Networks of Drug Users in {High-Risk} Sites: Finding the Connections},
volume = {6},
shorttitle = {Social Networks of Drug Users in {High-Risk} Sites},
url = {http://dx.doi.org/10.1023/A:1015457400897},
doi = {10.1023/A:1015457400897},
number = {2},
journal = {{AIDS} and Behavior},
author = {Margaret R. Weeks and Scott Clair and Stephen P. Borgatti and Kim Radda and Jean J. Schensul},
month = jun,
year = {2002},
pages = {193--206}
}
"""
__author__ = """\n""".join(['Drew Conway <drew.conway@nyu.edu>',
'Aric Hagberg <hagberg@lanl.gov>'])
from collections import defaultdict
import networkx as nx
import numpy
from scipy.cluster import hierarchy
from scipy.spatial import distance
import matplotlib.pyplot as plt
def create_hc(G):
"""Creates hierarchical cluster of graph G from distance matrix"""
path_length=nx.all_pairs_shortest_path_length(G)
distances=numpy.zeros((len(G),len(G)))
for u,p in path_length.items():
for v,d in p.items():
distances[u][v]=d
# Create hierarchical cluster
Y=distance.squareform(distances)
Z=hierarchy.complete(Y) # Creates HC using farthest point linkage
# This partition selection is arbitrary, for illustrive purposes
membership=list(hierarchy.fcluster(Z,t=1.15))
# Create collection of lists for blockmodel
partition=defaultdict(list)
for n,p in zip(list(range(len(G))),membership):
partition[p].append(n)
return list(partition.values())
if __name__ == '__main__':
G=nx.read_edgelist("hartford_drug.edgelist")
# Extract largest connected component into graph H
H=nx.connected_component_subgraphs(G)[0]
# Makes life easier to have consecutively labeled integer nodes
H=nx.convert_node_labels_to_integers(H)
# Create parititions with hierarchical clustering
partitions=create_hc(H)
# Build blockmodel graph
BM=nx.blockmodel(H,partitions)
# Draw original graph
pos=nx.spring_layout(H,iterations=100)
fig=plt.figure(1,figsize=(6,10))
ax=fig.add_subplot(211)
nx.draw(H,pos,with_labels=False,node_size=10)
plt.xlim(0,1)
plt.ylim(0,1)
# Draw block model with weighted edges and nodes sized by number of internal nodes
node_size=[BM.node[x]['nnodes']*10 for x in BM.nodes()]
edge_width=[(2*d['weight']) for (u,v,d) in BM.edges(data=True)]
# Set positions to mean of positions of internal nodes from original graph
posBM={}
for n in BM:
xy=numpy.array([pos[u] for u in BM.node[n]['graph']])
posBM[n]=xy.mean(axis=0)
ax=fig.add_subplot(212)
nx.draw(BM,posBM,node_size=node_size,width=edge_width,with_labels=False)
plt.xlim(0,1)
plt.ylim(0,1)
plt.axis('off')
plt.savefig('hartford_drug_block_model.png')
| bsd-3-clause |
dcherian/tools | ROMS/pmacc/tools/post_tools/rompy/tags/rompy-0.1.6/test.py | 4 | 8114 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from rompy import rompy, plot_utils, utils
map1 = False
map2 = False
map3 = False
map4 = False
map5 = False
map6 = False
map7 = False
map8 = False
map9 = False
map10 = False
map11 = False
map1 = True
# map2 = True
# map3 = True
# map4 = True
# map5 = True
# map6 = True
# map7 = True
# map8 = True
# map9 = True
# map10 = True
# map11 = True
if map1:
print('map1')
(data, coords) = rompy.extract('ocean_his_0001.nc',varname='h')
plot_utils.plot_surface(coords['xm'],coords['ym'],data)
plot_utils.plot_map(coords['xm'],coords['ym'],data,filename='/Users/lederer/tmp/rompy.map.png')
del(data)
del(coords)
if map2:
print('map2')
# full domain
#x = np.linspace(-127.,-122.,100)
#y = np.linspace(44.,50.,100)
#puget sound area
#x = np.linspace(-123.,-122.,500)
#y = np.linspace(47.,48.,500)
# hood canal
x = np.linspace(-123.25,-122.5,400)
y = np.linspace(47.33,48.0,400)
(data, coords) = rompy.extract('ocean_his_0001.nc', varname='zeta',extraction_type='points', x=x, y=y)
plot_utils.plot_map(coords['xm'],coords['ym'],data,filename='/Users/lederer/tmp/rompy.map2.png',resolution='h')
# plot_utils.plot_surface(coords['xm'],coords['ym'],data,filename='/Users/lederer/tmp/rompy.map2.png')
if map3:
print('map3')
(data, coords) = rompy.extract('ocean_his_0001.nc',varname='v',extraction_type='full')
print(data.shape)
for key in coords:
print(key, coords[key].shape)
plot_utils.plot_profile(data[:,20,20],coords['zm'][:,20,20],filename='/Users/lederer/tmp/rompy.profile.png')
if map4:
print('map4')
(data, coords) = rompy.extract('ocean_his_0001.nc',varname='salt',extraction_type='surface')
plot_utils.plot_map(coords['xm'],coords['ym'],data,filename='/Users/lederer/tmp/rompy.map4.png',resolution='h')
if map5:
print('map5')
# middle of pacific
# x = np.linspace(-126.0,-125.0,1001)
# y = np.linspace(45.0,46.0,1001)
# hood canal PRISM Cruise February 2009
x,y = utils.hood_canal_xy()
#cs = np.linspace(-0.96103753,-0.00143376,10)
(data, coords) = rompy.extract('ocean_his_0001.nc',varname='salt',extraction_type='profile',x=x,y=y)#,cs=cs)
fig = Figure(facecolor='white')
ax = fig.add_subplot(111)
# my_plot = ax.pcolormesh(np.arange(data.shape[1]),coords['zm'],data,clim=(0,35),colorbar=True)
my_plot = ax.contourf(np.tile(np.arange(data.shape[1]),(coords['zm'].shape[0],1)),coords['zm'],data,100)
my_plot2 = ax.contour(np.tile(np.arange(data.shape[1]),(coords['zm'].shape[0],1)),coords['zm'],data,100,linewidths=1,linestyle=None)
ax.fill_between(np.arange(data.shape[1]),coords['zm'][0,:],ax.get_ylim()[0],color='grey')
fig.colorbar(my_plot,ax=ax)
ax.set_title('Hood Canal Salinity from a ROMS run')
ax.set_ylabel('depth in meters')
ax.set_xticks(np.arange(data.shape[1]))
ax.set_xticklabels(utils.hood_canal_station_list())
ax.set_xlabel('station ID')
FigureCanvas(fig).print_png('/Users/lederer/tmp/rompy.map5.png')
if map6:
print('map6')
# middle of pacific
# x = np.linspace(-126.0,-125.0,1001)
# y = np.linspace(45.0,46.0,1001)
# hood canal PRISM Cruise February 2009
x,y = utils.main_basin_xy()
#cs = np.linspace(-0.96103753,-0.00143376,10)
(data, coords) = rompy.extract('ocean_his_0001.nc',varname='salt',extraction_type='profile',x=x,y=y)#,cs=cs)
fig = Figure(facecolor='white')
ax = fig.add_subplot(111)
# my_plot = ax.pcolormesh(np.arange(data.shape[1]),coords['zm'],data,clim=(0,35),colorbar=True)
my_plot = ax.contourf(np.tile(np.arange(data.shape[1]),(coords['zm'].shape[0],1)),coords['zm'],data,100)
my_plot2 = ax.contour(np.tile(np.arange(data.shape[1]),(coords['zm'].shape[0],1)),coords['zm'],data,100,linewidths=1,linestyle=None)
ax.fill_between(np.arange(data.shape[1]),coords['zm'][0,:],ax.get_ylim()[0],color='grey')
fig.colorbar(my_plot,ax=ax)
ax.set_title('Main Basin Salinity from a ROMS run')
ax.set_ylabel('depth in meters')
ax.set_xticks(np.arange(data.shape[1]))
ax.set_xticklabels(utils.main_basin_station_list())
ax.set_xlabel('station ID')
FigureCanvas(fig).print_png('/Users/lederer/tmp/rompy.map6.png')
if map7: # Main Basin
print('map7')
n = 10
x,y = utils.high_res_main_basin_xy(n=n)
# Salinity
(data, coords) = rompy.extract('ocean_his_0001.nc', varname='salt', extraction_type='profile', x=x, y=y)
plot_utils.plot_mickett(coords=coords, data=data, varname='Salinity', region='Main Basin', filename='/Users/lederer/tmp/rompy.mickett_main_salt.png', n=n, x_axis_offset=utils.offset_region(coords), clim=[0,20,32,32], cmap='banas_hsv_cm', labeled_contour_gap=2)
# Temperature
(data, coords) = rompy.extract('ocean_his_0001.nc',varname='temp',extraction_type='profile',x=x,y=y)
plot_utils.plot_mickett(coords=coords, data=data, varname='Temperature', region='Main Basin', filename='/Users/lederer/tmp/rompy.mickett_main_temp.png', n=n, x_axis_offset=utils.offset_region(coords), clim=[0,20], cmap='banas_hsv_cm', labeled_contour_gap=2)
if map8: # Hood Canal
print('map8')
n=10
x,y = utils.high_res_hood_canal_xy(n=n)
# Salinity
(data, coords) = rompy.extract('ocean_his_0001.nc', varname='salt', extraction_type='profile', x=x, y=y)
plot_utils.plot_mickett(coords=coords, data=data, varname='Salinity', region='Hood Canal', filename='/Users/lederer/tmp/rompy.mickett_hood_salt.png', n=n, x_axis_offset=utils.offset_region(coords), clim=[0,20,32,32], cmap='banas_hsv_cm')
# Temperature
(data, coords) = rompy.extract('ocean_his_0001.nc', varname='temp', extraction_type='profile', x=x, y=y)
plot_utils.plot_mickett(coords=coords, data=data, varname='Temperature', region='Hood Canal', filename='/Users/lederer/tmp/rompy.mickett_hood_temp.png', n=n, x_axis_offset=utils.offset_region(coords), clim=[0,20], cmap='banas_hsv_cm')
if map9: # velocity in Hood Canal
print('map9')
n=20
x,y = utils.high_res_hood_canal_xy(n=n)
(u, coords) = rompy.extract('ocean_his_0001.nc',varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract('ocean_his_0001.nc',varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
plot_utils.plot_mickett(coords=coords,data=data,varname='U', region='Hood Canal', filename='/Users/lederer/tmp/rompy.mickett_hood_U.png', n=n, clim=[-2,2], x_axis_offset=utils.offset_region(coords),cmap='red_blue')
if map10: # velocity in Main Basin
print('map10')
n=3
x,y = utils.high_res_main_basin_xy(n=n)
(u, coords) = rompy.extract('ocean_his_0001.nc',varname='u',extraction_type='profile',x=x,y=y)
(v, coords) = rompy.extract('ocean_his_0001.nc',varname='v',extraction_type='profile',x=x,y=y)
data = np.zeros(u.shape)
for i in range(u.shape[1]):
if i == u.shape[1]-1:
x_vec = np.array([x[i] - x[i-1], y[i] - y[i-1]])
else:
x_vec = np.array([x[i+1] - x[i], y[i+1] - y[i]])
for j in range(u.shape[0]):
u_vec = np.array([u[j,i], v[j,i]])
data[j,i] = np.dot(x_vec,u_vec)/(np.sqrt(np.dot(x_vec,x_vec)))
data = np.ma.array(data, mask=np.abs(data) > 100)
plot_utils.plot_mickett(coords=coords,data=data,varname='U', region=' Main Basin', filename='/Users/lederer/tmp/rompy.mickett_main_U.png', n=n, clim=[-2,2], x_axis_offset=utils.offset_region(coords),cmap='red_blue')
if map11:
print('map11')
n = 5
x,y = utils.high_res_hood_canal_xy(n=n)
# x,y = utils.high_res_main_basin_xy(n=n)
(data, coords) = rompy.extract('ocean_his_0001.nc', varname='salt', extraction_type='profile', x=x, y=y)
plot_utils.plot_parker(coords=coords, data=data, varname='Salinity', region='Hood Canal', filename='/Users/lederer/tmp/rompy.parker_hood_salt.png', n=n, x_axis_offset=utils.offset_region(coords), clim=[0,20,32,32], cmap='banas_hsv_cm')
| mit |
schreiberx/sweet | benchmarks_sphere/rexi_mass_energy_galewsky_martinium/pp_plot_csv.py | 2 | 2918 | #! /usr/bin/python2
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import numpy as np
import sys
first = True
s = 2e-5
eta_contour_levels = np.append(np.arange(-1e-4, 0, s), np.arange(s, 1e-4, s))
zoom_lat = True
zoom_lat = False
zoom_lat = 'eta' in sys.argv[1]
fontsize=8
figsize=(9, 3)
filename = sys.argv[1]
ref_filename = sys.argv[2]
if True:
print(filename)
data = np.loadtxt(filename, skiprows=3)
labelsx = data[0,1:]
labelsy = data[1:,0]
data = data[1:,1:]
if np.isnan(data).any():
print("Skipping "+filename+" because of NaN")
sys.exit(1)
if zoom_lat:
while labelsy[1] < 10:
labelsy = labelsy[1:]
data = data[1:]
while labelsy[-2] > 80:
labelsy = labelsy[0:-2]
data = data[0:-2]
# while labelsx[1] < 90:
# tmplabelsx = labelsx[0]
# labelsx[0:-1] = labelsx[1:]
# labelsx[-1] = tmplabelsx
#
# tmpdata = data[:,0]
# data[:,0:-1] = data[:,1:]
# data[:,-1] = tmpdata
# Reference
if True:
refdata = np.loadtxt(ref_filename, skiprows=3)
refdata = refdata[1:,1:]
if zoom_lat:
while labelsy[1] < 10:
labelsy = labelsy[1:]
refdata = refdata[1:]
while labelsy[-2] > 80:
labelsy = labelsy[0:-2]
refdata = refdata[0:-2]
if first:
lon_min = labelsx[0]
lon_max = labelsx[-1]
lat_min = labelsy[0]
lat_max = labelsy[-1]
new_labelsx = np.linspace(lon_min, lon_max, 7)
new_labelsy = np.linspace(lat_min, lat_max, 7)
labelsx = np.interp(new_labelsx, labelsx, labelsx)
labelsy = np.interp(new_labelsy, labelsy, labelsy)
if first:
cmin = np.amin(data)
cmax = np.amax(data)
if 'eta' in filename:
cmin *= 1.2
cmax *= 1.2
extent = (labelsx[0], labelsx[-1], labelsy[0], labelsy[-1])
plt.figure(figsize=figsize)
plt.imshow(data, interpolation='nearest', extent=extent, origin='lower', aspect='auto')
plt.clim(cmin, cmax)
cbar = plt.colorbar()
cbar.ax.tick_params(labelsize=fontsize)
plt.title(filename, fontsize=fontsize)
if 'eta' in filename:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=eta_contour_levels, linewidths=0.5)
plt.contour(refdata, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, levels=eta_contour_levels, linewidths=0.5, linestyles='dashed')
else:
if cmin != cmax:
plt.contour(data, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, linewidths=0.5)
plt.contour(refdata, colors="black", origin='lower', extent=extent, vmin=cmin, vmax=cmax, linewidths=0.5, linestyles='dashed')
ax = plt.gca()
ax.xaxis.set_label_coords(0.5, -0.075)
plt.xticks(labelsx, fontsize=fontsize)
plt.xlabel("Longitude", fontsize=fontsize)
plt.yticks(labelsy, fontsize=fontsize)
plt.ylabel("Latitude", fontsize=fontsize)
#plt.show()
outfilename = filename.replace('.csv', '.png')
print(outfilename)
plt.savefig(outfilename, dpi=200)
plt.close()
first = False
| mit |
astroumd/GradMap | notebooks/check_imports/draw_dolphins.py | 1 | 3779 | """
Draws dolphins using matplotlib features.
From matplotlib documentation:
https://matplotlib.org/gallery/shapes_and_collections/dolphin.html#sphx-glr-gallery-shapes-and-collections-dolphin-py
"""
# Fixing random state for reproducibility
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.patches import Circle, PathPatch
from matplotlib.path import Path
from matplotlib.transforms import Affine2D
import numpy as np
np.random.seed(19680801)
r = np.random.rand(50)
t = np.random.rand(50) * np.pi * 2.0
x = r * np.cos(t)
y = r * np.sin(t)
fig, ax = plt.subplots(figsize=(6, 6))
circle = Circle((0, 0), 1, facecolor='none',
edgecolor=(0, 0.8, 0.8), linewidth=3, alpha=0.5)
ax.add_patch(circle)
im = plt.imshow(np.random.random((100, 100)),
origin='lower', cmap=cm.winter,
interpolation='spline36',
extent=([-1, 1, -1, 1]))
im.set_clip_path(circle)
plt.plot(x, y, 'o', color=(0.9, 0.9, 1.0), alpha=0.8)
# Dolphin from OpenClipart library by Andy Fitzsimon
# <cc:License rdf:about="http://web.resource.org/cc/PublicDomain">
# <cc:permits rdf:resource="http://web.resource.org/cc/Reproduction"/>
# <cc:permits rdf:resource="http://web.resource.org/cc/Distribution"/>
# <cc:permits rdf:resource="http://web.resource.org/cc/DerivativeWorks"/>
# </cc:License>
dolphin = """
M -0.59739425,160.18173 C -0.62740401,160.18885 -0.57867129,160.11183
-0.57867129,160.11183 C -0.57867129,160.11183 -0.5438361,159.89315
-0.39514638,159.81496 C -0.24645668,159.73678 -0.18316813,159.71981
-0.18316813,159.71981 C -0.18316813,159.71981 -0.10322971,159.58124
-0.057804323,159.58725 C -0.029723983,159.58913 -0.061841603,159.60356
-0.071265813,159.62815 C -0.080250183,159.65325 -0.082918513,159.70554
-0.061841203,159.71248 C -0.040763903,159.7194 -0.0066711426,159.71091
0.077336307,159.73612 C 0.16879567,159.76377 0.28380306,159.86448
0.31516668,159.91533 C 0.3465303,159.96618 0.5011127,160.1771
0.5011127,160.1771 C 0.63668998,160.19238 0.67763022,160.31259
0.66556395,160.32668 C 0.65339985,160.34212 0.66350443,160.33642
0.64907098,160.33088 C 0.63463742,160.32533 0.61309688,160.297
0.5789627,160.29339 C 0.54348657,160.28968 0.52329693,160.27674
0.50728856,160.27737 C 0.49060916,160.27795 0.48965803,160.31565
0.46114204,160.33673 C 0.43329696,160.35786 0.4570711,160.39871
0.43309565,160.40685 C 0.4105108,160.41442 0.39416631,160.33027
0.3954995,160.2935 C 0.39683269,160.25672 0.43807996,160.21522
0.44567915,160.19734 C 0.45327833,160.17946 0.27946869,159.9424
-0.061852613,159.99845 C -0.083965233,160.0427 -0.26176109,160.06683
-0.26176109,160.06683 C -0.30127962,160.07028 -0.21167141,160.09731
-0.24649368,160.1011 C -0.32642366,160.11569 -0.34521187,160.06895
-0.40622293,160.0819 C -0.467234,160.09485 -0.56738444,160.17461
-0.59739425,160.18173
"""
vertices = []
codes = []
parts = dolphin.split()
i = 0
code_map = {
'M': Path.MOVETO,
'C': Path.CURVE4,
'L': Path.LINETO,
}
while i < len(parts):
path_code = code_map[parts[i]]
npoints = Path.NUM_VERTICES_FOR_CODE[path_code]
codes.extend([path_code] * npoints)
vertices.extend([[*map(float, y.split(','))]
for y in parts[i + 1:][:npoints]])
i += npoints + 1
vertices = np.array(vertices)
vertices[:, 1] -= 160
dolphin_path = Path(vertices, codes)
dolphin_patch = PathPatch(dolphin_path, facecolor=(0.6, 0.6, 0.6),
edgecolor=(0.0, 0.0, 0.0))
ax.add_patch(dolphin_patch)
vertices = Affine2D().rotate_deg(60).transform(vertices)
dolphin_path2 = Path(vertices, codes)
dolphin_patch2 = PathPatch(dolphin_path2, facecolor=(0.5, 0.5, 0.5),
edgecolor=(0.0, 0.0, 0.0))
ax.add_patch(dolphin_patch2)
plt.show()
| gpl-3.0 |
Denisolt/Tensorflow_Chat_Bot | local/lib/python2.7/site-packages/tensorflow/contrib/learn/python/learn/dataframe/queues/feeding_functions.py | 12 | 12480 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Helper functions for enqueuing data from arrays and pandas `DataFrame`s."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import random
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_queue_runner as fqr
from tensorflow.python import summary
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training import queue_runner
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
class _ArrayFeedFn(object):
"""Creates feed dictionaries from numpy arrays."""
def __init__(self,
placeholders,
array,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != 2:
raise ValueError("_array_feed_fn expects 2 placeholders; got {}.".format(
len(placeholders)))
self._placeholders = placeholders
self._array = array
self._max = len(array)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
return {self._placeholders[0]: integer_indexes,
self._placeholders[1]: self._array[integer_indexes]}
class _OrderedDictNumpyFeedFn(object):
"""Creates feed dictionaries from `OrderedDict`s of numpy arrays."""
def __init__(self,
placeholders,
ordered_dict_of_arrays,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(ordered_dict_of_arrays) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(ordered_dict_of_arrays), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._ordered_dict_of_arrays = ordered_dict_of_arrays
self._max = len(ordered_dict_of_arrays.values()[0])
for _, v in ordered_dict_of_arrays.items():
if len(v) != self._max:
raise ValueError("Array lengths must match.")
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
self._trav = (integer_indexes[-1] + 1) % self._max
feed_dict = {self._index_placeholder: integer_indexes}
cols = [column[integer_indexes]
for column in self._ordered_dict_of_arrays.values()]
feed_dict.update(dict(zip(self._col_placeholders, cols)))
return feed_dict
class _PandasFeedFn(object):
"""Creates feed dictionaries from pandas `DataFrames`."""
def __init__(self,
placeholders,
dataframe,
batch_size,
random_start=False,
seed=None,
num_epochs=None):
if len(placeholders) != len(dataframe.columns) + 1:
raise ValueError("Expected {} placeholders; got {}.".format(
len(dataframe.columns), len(placeholders)))
self._index_placeholder = placeholders[0]
self._col_placeholders = placeholders[1:]
self._dataframe = dataframe
self._max = len(dataframe)
self._batch_size = batch_size
self._num_epochs = num_epochs
self._epoch = 0
random.seed(seed)
self._trav = random.randrange(self._max) if random_start else 0
self._epoch_end = (self._trav - 1) % self._max
def __call__(self):
if self._num_epochs and self._epoch >= self._num_epochs:
raise errors.OutOfRangeError(None, None,
"Already emitted %s epochs." % self._epoch)
integer_indexes = [j % self._max
for j in range(self._trav, self._trav + self._batch_size)
]
if self._epoch_end in integer_indexes:
# after this batch we will have processed self._epoch epochs, possibly
# overshooting a bit to fill out a batch.
self._epoch += 1
if self._epoch == self._num_epochs:
# trim this batch, so as not to overshoot the last epoch.
batch_end_inclusive = integer_indexes.index(self._epoch_end)
integer_indexes = integer_indexes[:(batch_end_inclusive+1)]
self._trav = (integer_indexes[-1] + 1) % self._max
result = self._dataframe.iloc[integer_indexes]
cols = [result[col].values for col in result.columns]
feed_dict = dict(zip(self._col_placeholders, cols))
feed_dict[self._index_placeholder] = result.index.values
return feed_dict
def enqueue_data(data,
capacity,
shuffle=False,
min_after_dequeue=None,
num_threads=1,
seed=None,
name="enqueue_input",
enqueue_size=1,
num_epochs=None):
"""Creates a queue filled from a numpy array or pandas `DataFrame`.
Returns a queue filled with the rows of the given array or `DataFrame`. In
the case of a pandas `DataFrame`, the first enqueued `Tensor` corresponds to
the index of the `DataFrame`. For numpy arrays, the first enqueued `Tensor`
contains the row number.
Args:
data: a numpy `ndarray or` pandas `DataFrame` that will be read into the
queue.
capacity: the capacity of the queue.
shuffle: whether or not to shuffle the rows of the array.
min_after_dequeue: minimum number of elements that can remain in the queue
after a dequeue operation. Only used when `shuffle` is true. If not set,
defaults to `capacity` / 4.
num_threads: number of threads used for reading and enqueueing.
seed: used to seed shuffling and reader starting points.
name: a scope name identifying the data.
enqueue_size: the number of rows to enqueue per step.
num_epochs: limit enqueuing to a specified number of epochs, if provided.
Returns:
A queue filled with the rows of the given array or `DataFrame`.
Raises:
TypeError: `data` is not a Pandas `DataFrame` or a numpy `ndarray`.
"""
with ops.name_scope(name):
if isinstance(data, np.ndarray):
types = [dtypes.int64, dtypes.as_dtype(data.dtype)]
queue_shapes = [(), data.shape[1:]]
get_feed_fn = _ArrayFeedFn
elif isinstance(data, collections.OrderedDict):
types = [dtypes.int64] + [dtypes.as_dtype(col.dtype)
for col in data.values()]
queue_shapes = [()] + [col.shape[1:] for col in data.values()]
get_feed_fn = _OrderedDictNumpyFeedFn
elif HAS_PANDAS and isinstance(data, pd.DataFrame):
types = [dtypes.as_dtype(dt)
for dt in [data.index.dtype] + list(data.dtypes)]
queue_shapes = [() for _ in types]
get_feed_fn = _PandasFeedFn
else:
raise TypeError(
"data must be either a numpy array or pandas DataFrame if pandas is "
"installed; got {}".format(type(data).__name__))
# TODO(jamieas): TensorBoard warnings for all warnings below once available.
if num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with num_epochs and num_threads > 1. "
"num_epochs is applied per thread, so this will produce more "
"epochs than you probably intend. "
"If you want to limit epochs, use one thread.")
if shuffle and num_threads > 1 and num_epochs is not None:
logging.warning(
"enqueue_data was called with shuffle=True, num_threads > 1, and "
"num_epochs. This will create multiple threads, all reading the "
"array/dataframe in order adding to the same shuffling queue; the "
"results will likely not be sufficiently shuffled.")
if not shuffle and num_threads > 1:
logging.warning(
"enqueue_data was called with shuffle=False and num_threads > 1. "
"This will create multiple threads, all reading the "
"array/dataframe in order. If you want examples read in order, use"
" one thread; if you want multiple threads, enable shuffling.")
if shuffle:
min_after_dequeue = int(capacity / 4 if min_after_dequeue is None else
min_after_dequeue)
queue = data_flow_ops.RandomShuffleQueue(capacity,
min_after_dequeue,
dtypes=types,
shapes=queue_shapes,
seed=seed)
else:
min_after_dequeue = 0 # just for the summary text
queue = data_flow_ops.FIFOQueue(capacity,
dtypes=types,
shapes=queue_shapes)
enqueue_ops = []
feed_fns = []
for i in range(num_threads):
# Note the placeholders have no shapes, so they will accept any
# enqueue_size. enqueue_many below will break them up.
placeholders = [array_ops.placeholder(t) for t in types]
enqueue_ops.append(queue.enqueue_many(placeholders))
seed_i = None if seed is None else (i + 1) * seed
feed_fns.append(get_feed_fn(placeholders,
data,
enqueue_size,
random_start=shuffle,
seed=seed_i,
num_epochs=num_epochs))
runner = fqr.FeedingQueueRunner(queue=queue,
enqueue_ops=enqueue_ops,
feed_fns=feed_fns)
queue_runner.add_queue_runner(runner)
full = (math_ops.cast(
math_ops.maximum(0, queue.size() - min_after_dequeue),
dtypes.float32) * (1. / (capacity - min_after_dequeue)))
# Note that name contains a '/' at the end so we intentionally do not place
# a '/' after %s below.
summary_name = ("queue/%sfraction_over_%d_of_%d_full" %
(queue.name, min_after_dequeue,
capacity - min_after_dequeue))
summary.scalar(summary_name, full)
return queue
| gpl-3.0 |
0asa/scikit-learn | examples/neighbors/plot_digits_kde_sampling.py | 251 | 2022 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.grid_search import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| bsd-3-clause |
chrsrds/scikit-learn | examples/neighbors/plot_nca_illustration.py | 1 | 2974 | """
=============================================
Neighborhood Components Analysis Illustration
=============================================
An example illustrating the goal of learning a distance metric that maximizes
the nearest neighbors classification accuracy. The example is solely for
illustration purposes. Please refer to the :ref:`User Guide <nca>` for
more information.
"""
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.neighbors import NeighborhoodComponentsAnalysis
from matplotlib import cm
from sklearn.utils.fixes import logsumexp
print(__doc__)
n_neighbors = 1
random_state = 0
# Create a tiny data set of 9 samples from 3 classes
X, y = make_classification(n_samples=9, n_features=2, n_informative=2,
n_redundant=0, n_classes=3, n_clusters_per_class=1,
class_sep=1.0, random_state=random_state)
# Plot the points in the original space
plt.figure()
ax = plt.gca()
# Draw the graph nodes
for i in range(X.shape[0]):
ax.text(X[i, 0], X[i, 1], str(i), va='center', ha='center')
ax.scatter(X[i, 0], X[i, 1], s=300, c=cm.Set1(y[[i]]), alpha=0.4)
def p_i(X, i):
diff_embedded = X[i] - X
dist_embedded = np.einsum('ij,ij->i', diff_embedded,
diff_embedded)
dist_embedded[i] = np.inf
# compute exponentiated distances (use the log-sum-exp trick to
# avoid numerical instabilities
exp_dist_embedded = np.exp(-dist_embedded -
logsumexp(-dist_embedded))
return exp_dist_embedded
def relate_point(X, i, ax):
pt_i = X[i]
for j, pt_j in enumerate(X):
thickness = p_i(X, i)
if i != j:
line = ([pt_i[0], pt_j[0]], [pt_i[1], pt_j[1]])
ax.plot(*line, c=cm.Set1(y[j]),
linewidth=5*thickness[j])
# we consider only point 3
i = 3
# Plot bonds linked to sample i in the original space
relate_point(X, i, ax)
ax.set_title("Original points")
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
ax.axis('equal')
# Learn an embedding with NeighborhoodComponentsAnalysis
nca = NeighborhoodComponentsAnalysis(max_iter=30, random_state=random_state)
nca = nca.fit(X, y)
# Plot the points after transformation with NeighborhoodComponentsAnalysis
plt.figure()
ax2 = plt.gca()
# Get the embedding and find the new nearest neighbors
X_embedded = nca.transform(X)
relate_point(X_embedded, i, ax2)
for i in range(len(X)):
ax2.text(X_embedded[i, 0], X_embedded[i, 1], str(i),
va='center', ha='center')
ax2.scatter(X_embedded[i, 0], X_embedded[i, 1], s=300, c=cm.Set1(y[[i]]),
alpha=0.4)
# Make axes equal so that boundaries are displayed correctly as circles
ax2.set_title("NCA embedding")
ax2.axes.get_xaxis().set_visible(False)
ax2.axes.get_yaxis().set_visible(False)
ax2.axis('equal')
plt.show()
| bsd-3-clause |
adrian-soto/QEdark_repo | tools/bandsndos/bandsndos_Ge.py | 4 | 20068 | #
# Adrian Soto
# 22-12-2014
# Stony Brook University
#
################################################
# Plot band structure and DOS from the
# output of the bands.x program in the
# Quantum Espresso package.
#
# Features:
# 1) Allows for scissor correction (band shift)
# 2)
#
################################################
import math
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import rcParams
from matplotlib.ticker import AutoMinorLocator
import matplotlib.gridspec as gridspec
import csv
plt.rcParams['font.family'] = 'Serif'
plt.rcParams['font.serif'] = 'Times New Roman'
#rcParams['text.usetex'] = True
rcParams['font.size'] = 24
class band:
def __init__(self, numkpoints, bandenergies):
self.nks = numkpoints
if (len(bandenergies) != numkpoints):
print "ERROR: list of band energies has wrong length. Setting band to 0."
self.nrg = [0] * numkpoints
else:
self.nrg = bandenergies
def printband(self):
print self.nrg
def shift(self, delta):
self.nrg = map(lambda x : x+delta, self.nrg) # watch for scope here.
return
################################################
# End of class band
################################################
class kpoints:
def __init__(self):
self.klist = []
class dos:
def __init__(self): #, numE, dosE, dosG, dosI):
self.numE = 0
self.dosE = []
self.dosG = []
self.dosI = []
def Load(self, dosfile):
#
# Load DOS from dos.x output
#
print " "
print "Loading DOS from ", dosfile
print " "
# Count lines in file
self.numE=sum(1 for line in open(dosfile))
# Read file line by line and process
f=open(dosfile, 'r')
# First line is header. Discard
data=f.readline()
# Iterate over file lines
for ilin in range(1,self.numE):
data=f.readline()
E=float(data[0:7])
self.dosE.append(E)
G=float(data[9:19])
self.dosG.append(G)
I=float(data[21:31])
self.dosI.append(I)
f.close()
return
################################################
# End of class dos
################################################
#
# Global functions
#
def w0gauss(x):
# As in flib/w0gauss.f90 in the QE package
pi = 3.141592653589793
sqrt2=math.sqrt(2)
arg = min([200.0, (x - 1.0 / sqrt2 ) **2])
w0 = (1.0/math.sqrt(pi)) * math.exp(-1.0 * arg )*(2.0 - sqrt2*x)
return w0
def ReadBandStructure(bandsfile):
#
# This function reads the band structure as written
# to output of the bands.x program. It returns the bs
# as a flat list with all energies and another list with
# the k-point coordinates.
#
f = open(bandsfile, 'r')
# First line contains nbnd and nks. Read.
currentline = f.readline()
nks = int(currentline[22:26])
nbnd = int(currentline[12:16])
# Following lines contain the k-point coordinates
# and the band energies.
# Calculate number of lines containing band structure:
# nks k-point lines
# At each k-point there are (1+nbnd/10) energy values.
nlpkp = 1+nbnd/10 # Number of Lines Per K-Point
nlines = nks + nks * nlpkp
bsaux = []
xk = []
for ik in range (0, nks):
currentline = f.readline()
#kpoint = currentline[12:40]
kpoint = [float(x) for x in currentline.split()]
xk.append(kpoint)
auxenerg = []
for ibnd in range(0, nlpkp):
currentline = f.readline()
# append current line to auxiliary list
auxenerg.append( float(x) for x in currentline.split() )
# flatten list of lists containing energies for a given kpoint
# (each sublist corresponds to one line in the bands.dat file)
energ = [item for sublist in auxenerg for item in sublist]
# Sort ascendingly band energies for current k-point (to
# prevent artificial level crossings if QE bands.x output
# does not sort them correctly) and append to band structure
bsaux.append(sorted(energ))
f.close()
# Flatten bs list
bsflat = [item for sublist in bsaux for item in sublist]
return nks, nbnd, xk, bsflat
def SortByBands(nks, nbnd, bsflat):
# Rearrarange bs from k-points to bands
bs = []
for ibnd in range (0, nbnd):
currentband=[]
for ik in range (0, nks):
#currentband.append(bsflat[ik*nbnd+ibnd])
bs.append(bsflat[ik*nbnd+ibnd])
#bs.append( currentband )
return bs
def FindHLGap(nks, hvb, lcb):
#
# Find HOMO and LUMO energies and energy gap
#
# hvb = highest valence band
# lcb = lowest conduction band
#
# Ehvb = highest valence energy or HOMO energy
# Elcb = lowest conduction energy or LUMO energy
#
gap = lcb[0] - hvb[0]
for ik1 in range (0, nks):
auxcond = lcb[ik1]
for ik2 in range (0, nks):
auxval = hvb[ik2]
currentgap = auxcond-auxval
if (currentgap < 0.0):
print "ERROR: negative gap"
elif (currentgap < gap):
gap = currentgap
Ehvb = max(hvb)
Elcb = min(lcb)
return Ehvb, Elcb, gap
def Scissor(nks, newgap, bands, shifttype):
#
# shifttype == 0 : shift valence bands by -0.5*delta and
# conduction bands by 0.5*delta
# shifttype == 1 : as in 0 but placing the highest valence
# energy at 0.0
# shifttype == 2 : as in 0 but placing the gap center at 0.0
#
EHOMO, ELUMO, oldgap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta=(newgap-oldgap)/2.0
# Apply scissor to band structure
for ibnd in range (0, nbnd):
if (ibnd < nval):
bands[ibnd].shift(-1.0*delta)
else:
bands[ibnd].shift(delta)
if (shifttype==0):
print "Scissor correction to band energies has been applied."
return
elif (shifttype==1):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -1.0*EHOMO
#print "delta=", delta
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Highest valence energy has been set to 0.0 eV"
return
elif (shifttype==2):
EHOMO, ELUMO, gap = FindHLGap(nks, bands[nval-1].nrg , bands[nval].nrg)
delta = -0.5*(EHOMO+ELUMO)
for ibnd in range (0, nbnd):
bands[ibnd].shift(delta)
print "Scissor correction to band energies has been applied."
print "Gap center has been set to 0.0 eV"
return
else:
print "ERROR: shifttype has an non-valid value. Default value shifttype==0."
print "Scissor correction to band energies has been applied."
return
def CreateDOS(nks, nbnd, bzv, Emin, Emax, deltaE, bnd, normalize):
# ATTENTION: bnd must be an object of the class band
Emin = min(bnd[10].nrg)
Emax = max(bnd[nbnd-1].nrg)
ndos = int((Emax - Emin)/deltaE + 0.50000001) # int always rounds to lower integer
dosE = []
dosG = []
intg=0.0
deltaEgauss=5.0*deltaE
d3k=(1.0/nks)*bzv
wk=2.0/nks
print "Creating DOS with uniform k-point weights"
# Create DOS
for idos in range (0, ndos):
E = Emin + idos * deltaE
dosg = 0.0
for ik in range(0, nks):
for ibnd in range (0, nbnd):
dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaEgauss ) * wk
###dosg = dosg + w0gauss ( (E - bnd[ibnd].nrg[ik] ) / deltaE ) * wk
dosg = dosg/deltaEgauss
intg = intg + dosg*deltaE # integrated DOS
dosE.append(E)
dosG.append(dosg)
print "\n Integrated DOS=", intg, "\n"
# Normalize DOS
if (normalize == 1):
print "Normalizing DOS to 1.0 \n"
dosGnorm=dosG
for idos in range (0, ndos):
dosGnorm[idos]=dosGnorm[idos]/intg
return dosE, dosGnorm
if(normalize==0):
return dosE, dosG
else:
print " ERROR!! in CreateDOS function: wrong DOS normalization choice."
return
def PlotBandStructure(nbnd, nval, bnd, plotfile, Ef, sympoints, nks_btw_sympoints ):
#
# ATTENTION: bnd must be an object of the class band
#
# nval: number of valence bands
# Ef: Fermi Energy. If false then it won't print horizontal line
# sympoints: list containing labels of symmetry points
# nks_btw_sympoints: number of k-points between symmetry points
#
# NOTE: this function assumes that the number of points
# between symmetry points is constant
#
print "Plotting band structure to", plotfile
col = 'k'
for ibnd in range (0, nbnd):
#if (ibnd < nval):
# col='b'
#else:
# col='r'
plt.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
y_min = min(bnd[0].nrg)
y_max = min(bnd[nbnd-1].nrg)
plt.xlabel("Brillouin zone path")
plt.ylabel("band energies (eV)")
numsympoints = len(sympoints)
kpath=[]
xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
for i in range(0, numsympoints):
kpath.append(sympoints[i])
if (i < numsympoints-1):
for j in range (0, nks_btw_sympoints-1):
kpath.append('')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
plt.xticks(xticks, sympoints)
for i in range(0,numsympoints):
plt.axvline(x=xticks[i], ymin=y_min, ymax=y_max, hold=None, color='k', linewidth=0.25)
if (not Ef):
plt.axhline(Ef, color="black", linestyle="--")
plt.xlim( 0, len(bnd[0].nrg)-1 )
plt.savefig(plotfile)
return
def PlotDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of reals
plt.plot(dosG, dosE)
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.gca().set_xlim(left=0)
plt.savefig(plotname)
return
def PlotBnD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, dosE, dosG, plotname):
col = 'k'
# Two subplots, unpack the axes array immediately
f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)
for ibnd in range (0, nbnd):
ax1.plot(bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
ax1.set_title('Sharing Y axis')
ax2.plot(dosG, dosE)
ax2.set_xlim([0.0, 0.1])
plt.ylim([-15.0, 20.0])
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
plt.show()
return
def PlotBnDD(nbnd, nval, bnd, Ef, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, plotname):
######################################
# Plot generation and formatting
######################################
# Two subplots, unpack the axes array immediately
gs = gridspec.GridSpec(1, 2,width_ratios=[1,4])
f = plt.figure()
ax1 = plt.subplot(gs[1])
ax2 = plt.subplot(gs[0])
# Formatting
col = 'k'
ax1.set_xlabel("Brillouin zone path")
ax1.xaxis.set_label_position("bottom")
ax1.set_ylabel("E [eV]", rotation=270)
ax1.yaxis.set_label_position("right")
ax1.text(3.50-0.12, -12.50, 'Ge', fontsize=28)
###ax2.text(0.07, 18.00, 'Si', fontsize=18)
ax2.set_xlabel("DOS \n [eV$^{-1}$]")
ax2.xaxis.set_label_position("top")
#ax2.set_ylabel("E [eV]", rotation=270)
#y_min = -32.0
y_min = -13.0
y_max = 20.0
x2_min = 0.00
x2_max = 5.00
# Mirror
x2_min = 0.12
x2_max = 0.00
ax1.set_ylim([y_min, y_max])
ax2.set_xlim([x2_min, x2_max])
#ax2.set_xlim([0.0, 10.0])
ax2.set_ylim([y_min, y_max])
# Ticks
#minor_locator = AutoMinorLocator(2)
#ax2.xaxis.set_minor_locator(minor_locator)
# Number of symmetry points
numsympoints = len(sympoints)
# Generate horizontal axis containing k-path accumulated length (for BS plot)
x=0.0
klen=[x]
dx=1.0/((numsympoints-1)*nks_btw_sympoints)
for isym in range(0, numsympoints-1):
dx=sym_pt_dists[isym]/nks_btw_sympoints
for ipt in range(1, nks_btw_sympoints+1):
x=x+dx
klen.append(x)
#xticks = range(0, numsympoints*nks_btw_sympoints + 1, nks_btw_sympoints)
xticks=[]
for isym in range(0, numsympoints):
j = isym * nks_btw_sympoints
xticks.append(klen[j])
x1_min=min(xticks)
x1_max=max(xticks)
ax1.set_xlim(x1_min, x1_max)
# Plot bands
col = '0.4'
for ibnd in range (0, nbnd):
ax1.plot(klen , bnd[ibnd].nrg, markersize=2, linestyle='-', color=col) #marker = 'o')
# plt.axvline(x=xticks, ymin=0, ymax=1, hold=None, **kwargs)
# Ticks and vertical lines across BS plot
ax1.set_xticks(xticks)
ax1.set_xticklabels(sympoints)
# Plot DOSs
ax2.plot(dosG1, dosE1, linestyle='-', linewidth=1.0, color='b')
ax2.plot(dosG2, dosE2, linestyle='-', color='r')
#dosticks=[0.0, 0.05, 0.1, 0.15]
dosticks=[5, 0] # Mirror
ax2.set_xticks(dosticks)
ax2.set_xticklabels(dosticks)
#minor_locator = AutoMinorLocator(5)
#ax2.xaxis.set_minor_locator(minor_locator)
minorx2ticks=[4, 3, 2, 1]
ax2.set_xticks(minorx2ticks, minor = True)
# BS ticks
yticks=[-10, -5, 0, 5, 10, 15, 20]
minor_locator = AutoMinorLocator(5)
ax1.yaxis.set_minor_locator(minor_locator)
ax2.yaxis.set_minor_locator(minor_locator)
ax1.xaxis.tick_top()
#ax1.set_yticks(yticks)
#ax1.set_yticklabels(yticks)
# Mirror
ax1.yaxis.tick_right()
ax1.set_yticks(yticks)
ax1.set_yticklabels(yticks)
ax2.set_yticklabels([])
#plt.subplots_adjust(left=0.0, right=0.8)
plt.subplots_adjust(wspace = 0.0)
# Attempt to fill the area to the left of the DOS
# split values into positive and negative
alpha_fill=0.5
dosE1neg=[]
dosG1neg=[]
dosE1pos=[]
dosG1pos=[]
for i in range(0, len(dosE1)):
if(dosE1[i]<0.0):
dosE1neg.append(dosE1[i])
dosG1neg.append(dosG1[i])
else:
dosE1pos.append(dosE1[i])
dosG1pos.append(dosG1[i])
dosE1new =[y_min]+dosE1+[y_max]
dosG1new =[0.0]+dosG1+[0.0]
ax2.fill_between(dosG1new, 0, dosE1new, alpha=alpha_fill, linewidth=0.0, edgecolor='w')
# Vertical lines across BS plot
for i in range(0,numsympoints):
ax1.axvline(x=xticks[i], ymin=y_min, ymax=y_max, color='k', linewidth=0.25)
# Horizontal line at top of valence band
if (not Ef):
ax1.axhline(Ef, color="black", linestyle="--")
ax2.axhline(Ef, color="black", linestyle="--")
#plt.show()
plt.savefig(plotname, bbox_inches='tight')
return
def PlotMultipleDOS(dosE, dosG, plotname):
# ATTENTION: dosG and dosE must be lists of lists of reals
Ndos=len(dosE[:])
for i in range(0, Ndos):
plt.plot(dosG[i], dosE[i])
plt.xlabel("Density Of States")
plt.ylabel("band energies (eV)")
plt.savefig(plotname)
return
#def WriteBandStructure():
# print (" %10.6f%10.6f%10.6f" % (kpoint[0], kpoint[1], kpoint[2]) )
############################################################################################
############################################################################################
############################################################################################
############################################################################################
############################ PROGRAM STARTS HERE ###################################
############################################################################################
############################################################################################
############################################################################################
############################################################################################
bohr2ang=0.52918
############
# Band structure
############
filename="ge.bands.dat"
nks = 0
nbnd=0
xk=[]
bsflt=[]
bs=[]
sympoints=['$L$','$\Gamma$', '$X$', '$W$', '$K$', '$\Gamma$']
sym_pt_dists=[0.5*math.sqrt(3), 1.0, 0.5, 0.25*math.sqrt(2), 0.75*math.sqrt(2)] ## distances between symmetry points (by hand)
nks_btw_sympoints=50
# Read from file and sort bs by bands
nks, nbnd, xk, bsflt = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs = SortByBands(nks, nbnd, bsflt)
print "nks=", nks
print "nbnd=", nbnd
# Create band objects
bands=[]
for ibnd in range (0, nbnd):
ledge = ibnd*nks
redge = ledge+nks
currentband = bs[ledge:redge]
bands.append( band(nks, currentband) )
# Scissor correction
# Si
###alat = 10.330495 # Bohr
###nval = 4 # for Si
###exptgap = 1.11 # eV # Si
# Ge
alat = 10.8171069 # Bohr
nval = 14 # for Ge with semicore
exptgap = 0.67 # Ge
# Convert to ANG and calculate BZV
alat=alat*bohr2ang
V=(alat**3)/4.0 # Good for FCC
bzv = (2.0*math.pi)**3/V
ncond = nbnd - nval
Scissor(nks, exptgap, bands, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
#############
# DOS
#############
filename='ge.bands_full.dat'
nks1, nbnd1, xk1, bsflt1 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs1 = SortByBands(nks1, nbnd1, bsflt1)
print "nks=", nks1
print "nbnd=", nbnd1
# Create band objects
bands1=[]
for ibnd in range (0, nbnd1):
ledge1 = ibnd*nks1
redge1 = ledge1+nks1
currentband1 = bs1[ledge1:redge1]
bands1.append( band(nks1, currentband1) )
# Scissor correction
Scissor(nks1, exptgap, bands1, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
filename='ge.bands_243.dat'
nks2, nbnd2, xk2, bsflt2 = ReadBandStructure(filename)
if(nbnd==0):
print "%% ERROR READING BANDS. EXIT %%"
else:
bs2 = SortByBands(nks2, nbnd2, bsflt2)
print "nks=", nks2
print "nbnd=", nbnd2
# Create band objects
bands2=[]
for ibnd in range (0, nbnd2):
ledge2 = ibnd*nks2
redge2 = ledge2+nks2
currentband2 = bs2[ledge2:redge2]
bands2.append( band(nks2, currentband2) )
# Scissor correction
Scissor(nks2, exptgap, bands2, 1) # 3rd argument is 1. Then set 3rd argument of PlotBandStructure to 0.0
print "Scissor correction with gap set to", exptgap
# Generate DOSs
deltaE = 0.03 #eV
dosE1, dosG1 = CreateDOS(nks1, nbnd1, bzv, -13.0, 25.0, deltaE, bands1, 0)
dosE2, dosG2 = CreateDOS(nks2, nbnd2, bzv, -13.0, 25.0, deltaE, bands2, 0)
# Plot
#PlotDOS(dosE, dosG, "DOS.pdf")
#PlotBandStructure(nbnd, nval, bands, "BS.pdf", 0.0, sympoints, nks_btw_sympoints)
PlotBnDD(nbnd, nval, bands, 0.0, sympoints, nks_btw_sympoints, sym_pt_dists, dosE1, dosG1, dosE2, dosG2, "BSnDOS.pdf")
# DOS
#mydos=dos()
#mydos.Load('dos_full.dat')
#mydos.Printout()
| gpl-2.0 |
and2egg/philharmonic | philharmonic/tests/test_explorer.py | 1 | 1636 | from mock import patch
from nose.tools import *
import pandas as pd
import philharmonic
@patch('philharmonic.simulator.simulator.run')
def test_explore_ga_weights(mock_run):
philharmonic._setup('philharmonic.settings.ga_explore')
from philharmonic import conf
conf.parameter_space = 'GAWeights'
from philharmonic.explorer import explore
mock_run.return_value = {'Total cost ($)': 0.5}
with patch.object(philharmonic.explorer, '_serialise_results',
return_value=None) as mock_serialise:
explore()
@patch('philharmonic.simulator.simulator.run')
def test_explore_time_offsets(mock_run):
philharmonic._setup('philharmonic.settings.ga_explore')
from philharmonic import conf
conf.parameter_space = 'TimeOffsets'
from philharmonic.explorer import explore
mock_run.return_value = {'Total cost ($)': 0.5}
with patch.object(philharmonic.explorer, '_serialise_results',
return_value=None) as mock_serialise:
explore()
def test_time_offsets():
philharmonic._setup('philharmonic.settings.ga_explore')
from philharmonic import conf
conf.start = pd.Timestamp('2010-06-03 00:00')
conf.times = pd.date_range(conf.start, periods=3, freq='H')
conf.end = conf.times[-1]
conf.time_offsets_step = pd.offsets.DateOffset(months=2)
conf.time_offsets_start = pd.offsets.Hour(0) # the offset of the first run
conf.time_offsets_max = pd.offsets.DateOffset(months=11, days=20)
from philharmonic.explorer import TimeOffsets
combinations = TimeOffsets().combinations
assert_equals(combinations.shape, (6, 2))
| gpl-3.0 |
GGoussar/scikit-image | doc/examples/color_exposure/plot_adapt_rgb.py | 9 | 4535 | """
=========================================
Adapting gray-scale filters to RGB images
=========================================
There are many filters that are designed to work with gray-scale images but not
with color images. To simplify the process of creating functions that can adapt
to RGB images, scikit-image provides the ``adapt_rgb`` decorator.
To actually use the ``adapt_rgb`` decorator, you have to decide how you want to
adapt the RGB image for use with the gray-scale filter. There are two
pre-defined handlers:
``each_channel``
Pass each of the RGB channels to the filter one-by-one, and stitch the
results back into an RGB image.
``hsv_value``
Convert the RGB image to HSV and pass the value channel to the filter.
The filtered result is inserted back into the HSV image and converted
back to RGB.
Below, we demonstrate the use of ``adapt_rgb`` on a couple of gray-scale
filters:
"""
from skimage.color.adapt_rgb import adapt_rgb, each_channel, hsv_value
from skimage import filters
@adapt_rgb(each_channel)
def sobel_each(image):
return filters.sobel(image)
@adapt_rgb(hsv_value)
def sobel_hsv(image):
return filters.sobel(image)
######################################################################
# We can use these functions as we would normally use them, but now they work
# with both gray-scale and color images. Let's plot the results with a color
# image:
from skimage import data
from skimage.exposure import rescale_intensity
import matplotlib.pyplot as plt
image = data.astronaut()
fig = plt.figure(figsize=(14, 7))
ax_each = fig.add_subplot(121, adjustable='box-forced')
ax_hsv = fig.add_subplot(122, sharex=ax_each, sharey=ax_each,
adjustable='box-forced')
# We use 1 - sobel_each(image)
# but this will not work if image is not normalized
ax_each.imshow(rescale_intensity(1 - sobel_each(image)))
ax_each.set_xticks([]), ax_each.set_yticks([])
ax_each.set_title("Sobel filter computed\n on individual RGB channels")
# We use 1 - sobel_hsv(image) but this will not work if image is not normalized
ax_hsv.imshow(rescale_intensity(1 - sobel_hsv(image)))
ax_hsv.set_xticks([]), ax_hsv.set_yticks([])
ax_hsv.set_title("Sobel filter computed\n on (V)alue converted image (HSV)")
######################################################################
# Notice that the result for the value-filtered image preserves the color of
# the original image, but channel filtered image combines in a more
# surprising way. In other common cases, smoothing for example, the channel
# filtered image will produce a better result than the value-filtered image.
#
# You can also create your own handler functions for ``adapt_rgb``. To do so,
# just create a function with the following signature::
#
# def handler(image_filter, image, *args, **kwargs):
# # Manipulate RGB image here...
# image = image_filter(image, *args, **kwargs)
# # Manipulate filtered image here...
# return image
#
# Note that ``adapt_rgb`` handlers are written for filters where the image is
# the first argument.
#
# As a very simple example, we can just convert any RGB image to grayscale
# and then return the filtered result:
from skimage.color import rgb2gray
def as_gray(image_filter, image, *args, **kwargs):
gray_image = rgb2gray(image)
return image_filter(gray_image, *args, **kwargs)
######################################################################
# It's important to create a signature that uses ``*args`` and ``**kwargs``
# to pass arguments along to the filter so that the decorated function is
# allowed to have any number of positional and keyword arguments.
#
# Finally, we can use this handler with ``adapt_rgb`` just as before:
@adapt_rgb(as_gray)
def sobel_gray(image):
return filters.sobel(image)
fig = plt.figure(figsize=(7, 7))
ax = fig.add_subplot(111, sharex=ax_each, sharey=ax_each,
adjustable='box-forced')
# We use 1 - sobel_gray(image)
# but this will not work if image is not normalized
ax.imshow(rescale_intensity(1 - sobel_gray(image)), cmap=plt.cm.gray)
ax.set_xticks([]), ax.set_yticks([])
ax.set_title("Sobel filter computed\n on the converted grayscale image")
plt.show()
######################################################################
#
# .. note::
#
# A very simple check of the array shape is used for detecting RGB
# images, so ``adapt_rgb`` is not recommended for functions that support
# 3D volumes or color images in non-RGB spaces.
| bsd-3-clause |
AIML/scikit-learn | sklearn/utils/tests/test_validation.py | 133 | 18339 | """Tests for input validation functions"""
import warnings
from tempfile import NamedTemporaryFile
from itertools import product
import numpy as np
from numpy.testing import assert_array_equal
import scipy.sparse as sp
from nose.tools import assert_raises, assert_true, assert_false, assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_no_warnings
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import assert_warns
from sklearn.utils import as_float_array, check_array, check_symmetric
from sklearn.utils import check_X_y
from sklearn.utils.mocking import MockDataFrame
from sklearn.utils.estimator_checks import NotAnArray
from sklearn.random_projection import sparse_random_matrix
from sklearn.linear_model import ARDRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.svm import SVR
from sklearn.datasets import make_blobs
from sklearn.utils.validation import (
NotFittedError,
has_fit_parameter,
check_is_fitted,
check_consistent_length,
DataConversionWarning,
)
from sklearn.utils.testing import assert_raise_message
def test_as_float_array():
# Test function for as_float_array
X = np.ones((3, 10), dtype=np.int32)
X = X + np.arange(10, dtype=np.int32)
# Checks that the return type is ok
X2 = as_float_array(X, copy=False)
np.testing.assert_equal(X2.dtype, np.float32)
# Another test
X = X.astype(np.int64)
X2 = as_float_array(X, copy=True)
# Checking that the array wasn't overwritten
assert_true(as_float_array(X, False) is not X)
# Checking that the new type is ok
np.testing.assert_equal(X2.dtype, np.float64)
# Here, X is of the right type, it shouldn't be modified
X = np.ones((3, 2), dtype=np.float32)
assert_true(as_float_array(X, copy=False) is X)
# Test that if X is fortran ordered it stays
X = np.asfortranarray(X)
assert_true(np.isfortran(as_float_array(X, copy=True)))
# Test the copy parameter with some matrices
matrices = [
np.matrix(np.arange(5)),
sp.csc_matrix(np.arange(5)).toarray(),
sparse_random_matrix(10, 10, density=0.10).toarray()
]
for M in matrices:
N = as_float_array(M, copy=True)
N[0, 0] = np.nan
assert_false(np.isnan(M).any())
def test_np_matrix():
# Confirm that input validation code does not return np.matrix
X = np.arange(12).reshape(3, 4)
assert_false(isinstance(as_float_array(X), np.matrix))
assert_false(isinstance(as_float_array(np.matrix(X)), np.matrix))
assert_false(isinstance(as_float_array(sp.csc_matrix(X)), np.matrix))
def test_memmap():
# Confirm that input validation code doesn't copy memory mapped arrays
asflt = lambda x: as_float_array(x, copy=False)
with NamedTemporaryFile(prefix='sklearn-test') as tmp:
M = np.memmap(tmp, shape=100, dtype=np.float32)
M[:] = 0
for f in (check_array, np.asarray, asflt):
X = f(M)
X[:] = 1
assert_array_equal(X.ravel(), M)
X[:] = 0
def test_ordering():
# Check that ordering is enforced correctly by validation utilities.
# We need to check each validation utility, because a 'copy' without
# 'order=K' will kill the ordering.
X = np.ones((10, 5))
for A in X, X.T:
for copy in (True, False):
B = check_array(A, order='C', copy=copy)
assert_true(B.flags['C_CONTIGUOUS'])
B = check_array(A, order='F', copy=copy)
assert_true(B.flags['F_CONTIGUOUS'])
if copy:
assert_false(A is B)
X = sp.csr_matrix(X)
X.data = X.data[::-1]
assert_false(X.data.flags['C_CONTIGUOUS'])
def test_check_array():
# accept_sparse == None
# raise error on sparse inputs
X = [[1, 2], [3, 4]]
X_csr = sp.csr_matrix(X)
assert_raises(TypeError, check_array, X_csr)
# ensure_2d
X_array = check_array([0, 1, 2])
assert_equal(X_array.ndim, 2)
X_array = check_array([0, 1, 2], ensure_2d=False)
assert_equal(X_array.ndim, 1)
# don't allow ndim > 3
X_ndim = np.arange(8).reshape(2, 2, 2)
assert_raises(ValueError, check_array, X_ndim)
check_array(X_ndim, allow_nd=True) # doesn't raise
# force_all_finite
X_inf = np.arange(4).reshape(2, 2).astype(np.float)
X_inf[0, 0] = np.inf
assert_raises(ValueError, check_array, X_inf)
check_array(X_inf, force_all_finite=False) # no raise
# nan check
X_nan = np.arange(4).reshape(2, 2).astype(np.float)
X_nan[0, 0] = np.nan
assert_raises(ValueError, check_array, X_nan)
check_array(X_inf, force_all_finite=False) # no raise
# dtype and order enforcement.
X_C = np.arange(4).reshape(2, 2).copy("C")
X_F = X_C.copy("F")
X_int = X_C.astype(np.int)
X_float = X_C.astype(np.float)
Xs = [X_C, X_F, X_int, X_float]
dtypes = [np.int32, np.int, np.float, np.float32, None, np.bool, object]
orders = ['C', 'F', None]
copys = [True, False]
for X, dtype, order, copy in product(Xs, dtypes, orders, copys):
X_checked = check_array(X, dtype=dtype, order=order, copy=copy)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if order == 'C':
assert_true(X_checked.flags['C_CONTIGUOUS'])
assert_false(X_checked.flags['F_CONTIGUOUS'])
elif order == 'F':
assert_true(X_checked.flags['F_CONTIGUOUS'])
assert_false(X_checked.flags['C_CONTIGUOUS'])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and
X_checked.flags['C_CONTIGUOUS'] == X.flags['C_CONTIGUOUS']
and X_checked.flags['F_CONTIGUOUS'] == X.flags['F_CONTIGUOUS']):
assert_true(X is X_checked)
# allowed sparse != None
X_csc = sp.csc_matrix(X_C)
X_coo = X_csc.tocoo()
X_dok = X_csc.todok()
X_int = X_csc.astype(np.int)
X_float = X_csc.astype(np.float)
Xs = [X_csc, X_coo, X_dok, X_int, X_float]
accept_sparses = [['csr', 'coo'], ['coo', 'dok']]
for X, dtype, accept_sparse, copy in product(Xs, dtypes, accept_sparses,
copys):
with warnings.catch_warnings(record=True) as w:
X_checked = check_array(X, dtype=dtype,
accept_sparse=accept_sparse, copy=copy)
if (dtype is object or sp.isspmatrix_dok(X)) and len(w):
message = str(w[0].message)
messages = ["object dtype is not supported by sparse matrices",
"Can't check dok sparse matrix for nan or inf."]
assert_true(message in messages)
else:
assert_equal(len(w), 0)
if dtype is not None:
assert_equal(X_checked.dtype, dtype)
else:
assert_equal(X_checked.dtype, X.dtype)
if X.format in accept_sparse:
# no change if allowed
assert_equal(X.format, X_checked.format)
else:
# got converted
assert_equal(X_checked.format, accept_sparse[0])
if copy:
assert_false(X is X_checked)
else:
# doesn't copy if it was already good
if (X.dtype == X_checked.dtype and X.format == X_checked.format):
assert_true(X is X_checked)
# other input formats
# convert lists to arrays
X_dense = check_array([[1, 2], [3, 4]])
assert_true(isinstance(X_dense, np.ndarray))
# raise on too deep lists
assert_raises(ValueError, check_array, X_ndim.tolist())
check_array(X_ndim.tolist(), allow_nd=True) # doesn't raise
# convert weird stuff to arrays
X_no_array = NotAnArray(X_dense)
result = check_array(X_no_array)
assert_true(isinstance(result, np.ndarray))
def test_check_array_pandas_dtype_object_conversion():
# test that data-frame like objects with dtype object
# get converted
X = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]], dtype=np.object)
X_df = MockDataFrame(X)
assert_equal(check_array(X_df).dtype.kind, "f")
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
# smoke-test against dataframes with column named "dtype"
X_df.dtype = "Hans"
assert_equal(check_array(X_df, ensure_2d=False).dtype.kind, "f")
def test_check_array_dtype_stability():
# test that lists with ints don't get converted to floats
X = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
assert_equal(check_array(X).dtype.kind, "i")
assert_equal(check_array(X, ensure_2d=False).dtype.kind, "i")
def test_check_array_dtype_warning():
X_int_list = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
X_float64 = np.asarray(X_int_list, dtype=np.float64)
X_float32 = np.asarray(X_int_list, dtype=np.float32)
X_int64 = np.asarray(X_int_list, dtype=np.int64)
X_csr_float64 = sp.csr_matrix(X_float64)
X_csr_float32 = sp.csr_matrix(X_float32)
X_csc_float32 = sp.csc_matrix(X_float32)
X_csc_int32 = sp.csc_matrix(X_int64, dtype=np.int32)
y = [0, 0, 1]
integer_data = [X_int64, X_csc_int32]
float64_data = [X_float64, X_csr_float64]
float32_data = [X_float32, X_csr_float32, X_csc_float32]
for X in integer_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_warns(DataConversionWarning, check_array, X,
dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
# Check that the warning message includes the name of the Estimator
X_checked = assert_warns_message(DataConversionWarning,
'SomeEstimator',
check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True,
warn_on_dtype=True,
estimator='SomeEstimator')
assert_equal(X_checked.dtype, np.float64)
X_checked, y_checked = assert_warns_message(
DataConversionWarning, 'KNeighborsClassifier',
check_X_y, X, y, dtype=np.float64, accept_sparse=True,
warn_on_dtype=True, estimator=KNeighborsClassifier())
assert_equal(X_checked.dtype, np.float64)
for X in float64_data:
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=True)
assert_equal(X_checked.dtype, np.float64)
X_checked = assert_no_warnings(check_array, X, dtype=np.float64,
accept_sparse=True, warn_on_dtype=False)
assert_equal(X_checked.dtype, np.float64)
for X in float32_data:
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=True)
assert_equal(X_checked.dtype, np.float32)
assert_true(X_checked is X)
X_checked = assert_no_warnings(check_array, X,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=True)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X)
X_checked = assert_no_warnings(check_array, X_csc_float32,
dtype=[np.float64, np.float32],
accept_sparse=['csr', 'dok'],
copy=False)
assert_equal(X_checked.dtype, np.float32)
assert_false(X_checked is X_csc_float32)
assert_equal(X_checked.format, 'csr')
def test_check_array_min_samples_and_features_messages():
# empty list is considered 2D by default:
msg = "0 feature(s) (shape=(1, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [])
# If considered a 1D collection when ensure_2d=False, then the minimum
# number of samples will break:
msg = "0 sample(s) (shape=(0,)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_array, [], ensure_2d=False)
# Invalid edge case when checking the default minimum sample of a scalar
msg = "Singleton array array(42) cannot be considered a valid collection."
assert_raise_message(TypeError, msg, check_array, 42, ensure_2d=False)
# But this works if the input data is forced to look like a 2 array with
# one sample and one feature:
X_checked = check_array(42, ensure_2d=True)
assert_array_equal(np.array([[42]]), X_checked)
# Simulate a model that would need at least 2 samples to be well defined
X = np.ones((1, 10))
y = np.ones(1)
msg = "1 sample(s) (shape=(1, 10)) while a minimum of 2 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2)
# The same message is raised if the data has 2 dimensions even if this is
# not mandatory
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_samples=2, ensure_2d=False)
# Simulate a model that would require at least 3 features (e.g. SelectKBest
# with k=3)
X = np.ones((10, 2))
y = np.ones(2)
msg = "2 feature(s) (shape=(10, 2)) while a minimum of 3 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3)
# Only the feature check is enabled whenever the number of dimensions is 2
# even if allow_nd is enabled:
assert_raise_message(ValueError, msg, check_X_y, X, y,
ensure_min_features=3, allow_nd=True)
# Simulate a case where a pipeline stage as trimmed all the features of a
# 2D dataset.
X = np.empty(0).reshape(10, 0)
y = np.ones(10)
msg = "0 feature(s) (shape=(10, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, check_X_y, X, y)
# nd-data is not checked for any minimum number of features by default:
X = np.ones((10, 0, 28, 28))
y = np.ones(10)
X_checked, y_checked = check_X_y(X, y, allow_nd=True)
assert_array_equal(X, X_checked)
assert_array_equal(y, y_checked)
def test_has_fit_parameter():
assert_false(has_fit_parameter(KNeighborsClassifier, "sample_weight"))
assert_true(has_fit_parameter(RandomForestRegressor, "sample_weight"))
assert_true(has_fit_parameter(SVR, "sample_weight"))
assert_true(has_fit_parameter(SVR(), "sample_weight"))
def test_check_symmetric():
arr_sym = np.array([[0, 1], [1, 2]])
arr_bad = np.ones(2)
arr_asym = np.array([[0, 2], [0, 2]])
test_arrays = {'dense': arr_asym,
'dok': sp.dok_matrix(arr_asym),
'csr': sp.csr_matrix(arr_asym),
'csc': sp.csc_matrix(arr_asym),
'coo': sp.coo_matrix(arr_asym),
'lil': sp.lil_matrix(arr_asym),
'bsr': sp.bsr_matrix(arr_asym)}
# check error for bad inputs
assert_raises(ValueError, check_symmetric, arr_bad)
# check that asymmetric arrays are properly symmetrized
for arr_format, arr in test_arrays.items():
# Check for warnings and errors
assert_warns(UserWarning, check_symmetric, arr)
assert_raises(ValueError, check_symmetric, arr, raise_exception=True)
output = check_symmetric(arr, raise_warning=False)
if sp.issparse(output):
assert_equal(output.format, arr_format)
assert_array_equal(output.toarray(), arr_sym)
else:
assert_array_equal(output, arr_sym)
def test_check_is_fitted():
# Check is ValueError raised when non estimator instance passed
assert_raises(ValueError, check_is_fitted, ARDRegression, "coef_")
assert_raises(TypeError, check_is_fitted, "SVR", "support_")
ard = ARDRegression()
svr = SVR()
try:
assert_raises(NotFittedError, check_is_fitted, ard, "coef_")
assert_raises(NotFittedError, check_is_fitted, svr, "support_")
except ValueError:
assert False, "check_is_fitted failed with ValueError"
# NotFittedError is a subclass of both ValueError and AttributeError
try:
check_is_fitted(ard, "coef_", "Random message %(name)s, %(name)s")
except ValueError as e:
assert_equal(str(e), "Random message ARDRegression, ARDRegression")
try:
check_is_fitted(svr, "support_", "Another message %(name)s, %(name)s")
except AttributeError as e:
assert_equal(str(e), "Another message SVR, SVR")
ard.fit(*make_blobs())
svr.fit(*make_blobs())
assert_equal(None, check_is_fitted(ard, "coef_"))
assert_equal(None, check_is_fitted(svr, "support_"))
def test_check_consistent_length():
check_consistent_length([1], [2], [3], [4], [5])
check_consistent_length([[1, 2], [[1, 2]]], [1, 2], ['a', 'b'])
check_consistent_length([1], (2,), np.array([3]), sp.csr_matrix((1, 2)))
assert_raises_regexp(ValueError, 'inconsistent numbers of samples',
check_consistent_length, [1, 2], [1])
assert_raises_regexp(TypeError, 'got <\w+ \'int\'>',
check_consistent_length, [1, 2], 1)
assert_raises_regexp(TypeError, 'got <\w+ \'object\'>',
check_consistent_length, [1, 2], object())
assert_raises(TypeError, check_consistent_length, [1, 2], np.array(1))
# Despite ensembles having __len__ they must raise TypeError
assert_raises_regexp(TypeError, 'estimator', check_consistent_length,
[1, 2], RandomForestRegressor())
# XXX: We should have a test with a string, but what is correct behaviour?
| bsd-3-clause |
datapythonista/pandas | pandas/core/window/indexers.py | 2 | 12001 | """Indexer objects for computing start/end window bounds for rolling operations"""
from datetime import timedelta
from typing import (
Dict,
Optional,
Tuple,
Type,
)
import numpy as np
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import ensure_platform_int
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
class BaseIndexer:
"""Base class for window bounds calculations."""
def __init__(
self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs
):
"""
Parameters
----------
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.index_array = index_array
self.window_size = window_size
# Set user defined kwargs as attributes that can be used in get_window_bounds
for key, value in kwargs.items():
setattr(self, key, value)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class FixedWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of fixed length."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
if center:
offset = (self.window_size - 1) // 2
else:
offset = 0
end = np.arange(1 + offset, num_values + 1 + offset, dtype="int64")
start = end - self.window_size
if closed in ["left", "both"]:
start -= 1
if closed in ["left", "neither"]:
end -= 1
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
return start, end
class VariableWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of variable length, namely for time series."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# error: Argument 4 to "calculate_variable_window_bounds" has incompatible
# type "Optional[bool]"; expected "bool"
# error: Argument 6 to "calculate_variable_window_bounds" has incompatible
# type "Optional[ndarray]"; expected "ndarray"
return calculate_variable_window_bounds(
num_values,
self.window_size,
min_periods,
center, # type: ignore[arg-type]
closed,
self.index_array, # type: ignore[arg-type]
)
class VariableOffsetWindowIndexer(BaseIndexer):
"""Calculate window boundaries based on a non-fixed offset such as a BusinessDay"""
def __init__(
self,
index_array: Optional[np.ndarray] = None,
window_size: int = 0,
index=None,
offset=None,
**kwargs,
):
super().__init__(index_array, window_size, **kwargs)
self.index = index
self.offset = offset
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# if windows is variable, default is 'right', otherwise default is 'both'
if closed is None:
closed = "right" if self.index is not None else "both"
right_closed = closed in ["right", "both"]
left_closed = closed in ["left", "both"]
if self.index[num_values - 1] < self.index[0]:
index_growth_sign = -1
else:
index_growth_sign = 1
start = np.empty(num_values, dtype="int64")
start.fill(-1)
end = np.empty(num_values, dtype="int64")
end.fill(-1)
start[0] = 0
# right endpoint is closed
if right_closed:
end[0] = 1
# right endpoint is open
else:
end[0] = 0
# start is start of slice interval (including)
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = self.index[i]
start_bound = self.index[i] - index_growth_sign * self.offset
# left endpoint is closed
if left_closed:
start_bound -= Nano(1)
# advance the start bound until we are
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
start[i] = j
break
# end bound is previous end
# or current index
if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
end[i] = i + 1
else:
end[i] = end[i - 1]
# right endpoint is open
if not right_closed:
end[i] -= 1
return start, end
class ExpandingIndexer(BaseIndexer):
"""Calculate expanding window bounds, mimicking df.expanding()"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
return (
np.zeros(num_values, dtype=np.int64),
np.arange(1, num_values + 1, dtype=np.int64),
)
class FixedForwardWindowIndexer(BaseIndexer):
"""
Creates window boundaries for fixed-length windows that include the
current row.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
if center:
raise ValueError("Forward-looking windows can't have center=True")
if closed is not None:
raise ValueError(
"Forward-looking windows don't support setting the closed argument"
)
start = np.arange(num_values, dtype="int64")
end_s = start[: -self.window_size] + self.window_size
end_e = np.full(self.window_size, num_values, dtype="int64")
end = np.concatenate([end_s, end_e])
return start, end
class GroupbyIndexer(BaseIndexer):
"""Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
def __init__(
self,
index_array: Optional[np.ndarray] = None,
window_size: int = 0,
groupby_indicies: Optional[Dict] = None,
window_indexer: Type[BaseIndexer] = BaseIndexer,
indexer_kwargs: Optional[Dict] = None,
**kwargs,
):
"""
Parameters
----------
index_array : np.ndarray or None
np.ndarray of the index of the original object that we are performing
a chained groupby operation over. This index has been pre-sorted relative to
the groups
window_size : int
window size during the windowing operation
groupby_indicies : dict or None
dict of {group label: [positional index of rows belonging to the group]}
window_indexer : BaseIndexer
BaseIndexer class determining the start and end bounds of each group
indexer_kwargs : dict or None
Custom kwargs to be passed to window_indexer
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.groupby_indicies = groupby_indicies or {}
self.window_indexer = window_indexer
self.indexer_kwargs = indexer_kwargs or {}
super().__init__(
index_array, self.indexer_kwargs.pop("window_size", window_size), **kwargs
)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# 1) For each group, get the indices that belong to the group
# 2) Use the indices to calculate the start & end bounds of the window
# 3) Append the window bounds in group order
start_arrays = []
end_arrays = []
window_indicies_start = 0
for key, indices in self.groupby_indicies.items():
index_array: np.ndarray | None
if self.index_array is not None:
index_array = self.index_array.take(ensure_platform_int(indices))
else:
index_array = self.index_array
indexer = self.window_indexer(
index_array=index_array,
window_size=self.window_size,
**self.indexer_kwargs,
)
start, end = indexer.get_window_bounds(
len(indices), min_periods, center, closed
)
start = start.astype(np.int64)
end = end.astype(np.int64)
# Cannot use groupby_indicies as they might not be monotonic with the object
# we're rolling over
window_indicies = np.arange(
window_indicies_start, window_indicies_start + len(indices)
)
window_indicies_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indicies = np.append(
window_indicies, [window_indicies[-1] + 1]
).astype(np.int64)
start_arrays.append(window_indicies.take(ensure_platform_int(start)))
end_arrays.append(window_indicies.take(ensure_platform_int(end)))
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
return start, end
class ExponentialMovingWindowIndexer(BaseIndexer):
"""Calculate ewm window bounds (the entire window)"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64)
| bsd-3-clause |
ORNL-CEES/Cap | python/example/helpers.py | 3 | 2254 | from pycap import Observer, ECLabAsciiFile
from IPython import display
from numpy import real, imag, absolute, angle
from matplotlib import pyplot
from sys import stdout, exit
from os import remove
class PrintColumns(Observer):
def __new__(cls, *args, **kwargs):
return object.__new__(PrintColumns)
def __init__(self):
self._template = u''
for i in range(3):
self._template += '{left}{0}:{format_spec}{right}{separator}'\
.format(i, format_spec='{format_spec}',
left='{', right='}', separator='\t')
def update(self, subject, *args, **kwargs):
extra = '>20'
print(self._template.format('freq/Hz',
'Re(Z)/ohm',
'-Im(Z)/ohm',
format_spec=extra + "s"),
file=stdout)
n = subject._data['frequency'].size
for i in range(n):
f = subject._data['frequency'][i]
Z = subject._data['impedance'][i]
Y = 1.0 / Z
place_holder = 255
line = self._template.format(float(f),
float(real(Z)),
-float(imag(Z)),
format_spec=extra + '.7e')
print(line, file=stdout)
class RefreshDisplay(Observer):
def __new__(cls, *args, **kwargs):
return object.__new__(RefreshDisplay)
def update(self, subject, *args, **kwargs):
display.clear_output(wait=True)
display.display(pyplot.gcf())
def check_input(device, experiment):
experiment._extra_data = device.inspect()
dummy = ECLabAsciiFile('dummy')
dummy.update(experiment)
with open('dummy', 'r', encoding='latin-1') as fin:
lines = fin.readlines()
for line in lines[7:-1]:
print(line.rstrip('\n'))
remove('dummy')
print('continue? [Y/n]')
yes = set(['yes', 'y', ''])
no = set(['no', 'n'])
while True:
answer = input().lower()
if answer in yes:
break
elif answer in no:
exit(0)
else:
print("Please respond with 'yes' or 'no'")
| bsd-3-clause |
vrooje/pulsar-hunters-analysis | aggregate_pulsarclass.py | 1 | 33505 | #Python 2.7.9 (default, Apr 5 2015, 22:21:35)
import sys, os
# file with raw classifications (csv)
# put this way up here so if there are no inputs we exit quickly before even trying to load everything else
try:
classfile_in = sys.argv[1]
except:
#classfile_in = 'data/2e3d12a2-56ca-4d1f-930a-9ecc7fd39885.csv'
print("\nUsage: %s classifications_infile [weight_class aggregations_outfile]" % sys.argv[0])
print(" classifications_infile is a Zooniverse (Panoptes) classifications data export CSV.")
print(" weight_class is 1 if you want to calculate and apply user weightings, 0 otherwise.")
print(" aggregations_outfile is the name of the file you want written. If you don't specify,")
print(" the filename is %s by default." % outfile_default)
sys.exit(0)
import numpy as np # using 1.10.1
import pandas as pd # using 0.13.1
#import datetime
#import dateutil.parser
import json
############ Define files and settings below ##############
# default outfile
outfile_default = 'pulsar_aggregations.csv'
rankfile_stem = 'subjects_ranked_by_weighted_class_asof_'
# file with tags left in Talk, for value-added columns below
talk_export_file = "project-764-tags_2016-01-15.json"
# file with master list between Zooniverse metadata image filename (no source coords) and
# original filename with source coords and additional info
# also I get to have a variable that uses "filename" twice where each means a different thing
# a filename for a file full of filenames #alliterationbiyotch
filename_master_list_filename = "HTRU-N_sets_keys.csv"
# this is a list of possible matches to known pulsars that was done after the fact so they
# are flagged as "cand" in the database instead of "known" etc.
poss_match_file = 'PossibleMatches.csv'
# later we will select on tags by the project team and possibly weight them differently
# note I've included the moderators and myself (though I didn't tag anything).
# Also note it's possible to do this in a more general fashion using a file with project users and roles
# However, hard-coding seemed the thing to do given our time constraints (and the fact that I don't think
# you can currently export the user role file from the project builder)
project_team = 'bretonr jocelynbb spindizzy Simon_Rookyard Polzin cristina_ilie jamesy23 ADCameron Prabu walkcr roblyon chiatan llevin benjamin_shaw bhaswati djchampion jwbmartin bstappers ElisabethB Capella05 vrooje'.split()
# define the active workflow - we will ignore all classifications not on this workflow
# we could make this an input but let's not get too fancy for a specific case.
# for beta test
#active_workflow_id = 1099
#active_workflow_major = 6
# for live project
active_workflow_id = 1224
active_workflow_major = 4
# do we want sum(weighted vote count) = sum(raw vote count)?
normalise_weights = True
# do we want to write an extra file with just classification counts and usernames
# (and a random color column, for treemaps)?
counts_out = True
counts_out_file = 'class_counts_colors.csv'
############ Set the other inputs now ###############
try:
apply_weight = int(sys.argv[2])
except:
apply_weight = 0
try:
outfile = sys.argv[3]
except:
outfile = outfile_default
#################################################################################
#################################################################################
#################################################################################
# This is the function that actually does the aggregating
def aggregate_class(grp):
# translate the group to a dataframe because FML if I don't (some indexing etc is different)
thegrp = pd.DataFrame(grp)
# figure out what we're looping over below
answers = thegrp.pulsar_classification.unique()
# aggregating is a matter of grouping by different answers and summing the counts/weights
byans = thegrp.groupby('pulsar_classification')
ans_ct_tot = byans['count'].aggregate('sum')
ans_wt_tot = byans['weight'].aggregate('sum')
# we want fractions eventually, so we need denominators
count_tot = np.sum(ans_ct_tot) # we could also do len(thegrp)
weight_tot = np.sum(ans_wt_tot)
# okay, now we should have a series of counts for each answer, one for weighted counts, and
# the total votes and weighted votes for this subject.
# now loop through the possible answers and create the raw and weighted vote fractions
# and save the counts as well.
# this is a list for now and we'll make it into a series and order the columns later
class_agg = {}
class_agg['count_unweighted'] = count_tot
class_agg['count_weighted'] = weight_tot
class_agg['subject_type'] = thegrp.subject_type.unique()[0]
class_agg['filename'] = thegrp.filename.unique()[0]
for a in answers:
# don't be that jerk who labels things with "p0" or otherwise useless internal indices.
# Use the text of the response next to this answer choice in the project builder (but strip spaces)
raw_frac_label = ('p_'+a).replace(' ', '_')
wt_frac_label = ('p_'+a+'_weight').replace(' ', '_')
class_agg[raw_frac_label] = ans_ct_tot[a]/float(count_tot)
class_agg[wt_frac_label] = ans_wt_tot[a]/float(weight_tot)
# oops, this is hard-coded so that there's Yes and No as answers - sorry to those trying to generalise
col_order = ["filename", "p_Yes", "p_No", "p_Yes_weight", "p_No_weight",
"count_unweighted", "count_weighted", "subject_type"]
return pd.Series(class_agg)[col_order]
#################################################################################
#################################################################################
#################################################################################
# The new weighting assignment function allows the user to choose between different weighting schemes
# though note the one in this function is not preferred for reasons explained below.
def assign_weight_old(seed):
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
# assigns a weight based on a seed parameter
# The weight is assigned using the seed as an exponent and the number below as the base.
# The number is just slightly offset from 1 so that it takes many classifications for
# a user's potential weight to cap out at the max weight (3) or bottom out at the min (0.05).
# Currently there are 641 "known" pulsars in the DB so the base of 1.025 is largely based on that.
# Update: there are now about 5,000 simulated pulsars in the subject set as well, and they have a
# much higher retirement limit, so that more people will have classified them and we have more info.
# Note I'd rather this did a proper analysis with a confusion matrix etc but under a time crunch
# we went with something simpler.
def assign_weight(q, which_weight):
# the floor weight for the case of which_weight == 2
# i.e. someone who has seed = 0 will have this
# seed = 0 could either be equal numbers right & wrong, OR that we don't have any information
c0 = 0.5
seed = q[1].seed
n_gs = q[1].n_gs
# Two possible weighting schemes:
# which_weight == 1: w = 1.0025^(seed), bounded between 0.05 and 3.0
# which_weight == 2: w = (1 + log n_gs)^(seed/n_gs), bounded between 0.05 and 3.0
#
# Weighting Scheme 1:
# this is an okay weighting scheme, but it doesn't account for the fact that someone might be prolific
# but not a very good classifier, and those classifiers shouldn't have a high weight.
# Example: Bob does 10000 gold-standard classifications and gets 5100 right, 4900 wrong.
# In this weighting scheme, Bob's weighting seed is +100, which means a weight of 1.0025^100 = 1.3,
# despite the fact that Bob's classifications are consistent with random within 1%.
# The weighting below this one would take the weight based on 100/10000, which is much better.
if which_weight == 1:
# keep the two seed cases separate because we might want to use a different base for each
if seed < 0.:
return max([0.05, pow(1.0025, seed)])
elif seed > 0:
return min([3.0, pow(1.0025, seed)])
else:
return 1.0
elif which_weight == 2:
if n_gs < 1: # don't divide by or take the log of 0
# also if they didn't do any gold-standard classifications assume they have the default weight
return c0
else:
# note the max of 3 is unlikely to be reached, but someone could hit the floor.
return min([3.0, max([0.05, c0*pow((1.0 + np.log10(n_gs)), (float(seed)/float(n_gs)))])])
else:
# unweighted - so maybe don't even enter this function if which_weight is not 1 or 2...
return 1.0
#################################################################################
#################################################################################
#################################################################################
# Get the Gini coefficient - https://en.wikipedia.org/wiki/Gini_coefficient
# Typical values of the Gini for healthy Zooniverse projects (Cox et al. 2015) are
# in the range of 0.7-0.9.
def gini(list_of_values):
sorted_list = sorted(list_of_values)
height, area = 0, 0
for value in sorted_list:
height += value
area += height - value / 2.
fair_area = height * len(list_of_values) / 2
return (fair_area - area) / fair_area
#################################################################################
#################################################################################
#################################################################################
# assign a color randomly if logged in, gray otherwise
def randcolor(user_label):
if user_label.startswith('not-logged-in-'):
# keep it confined to grays, i.e. R=G=B and not too bright, not too dark
g = random.randint(25,150)
return '#%02X%02X%02X' % (g,g,g)
#return '#555555'
else:
# the lambda makes this generate a new int every time it's called, so that
# in general R != G != B below.
r = lambda: random.randint(0,255)
return '#%02X%02X%02X' % (r(),r(),r())
#################################################################################
#################################################################################
#################################################################################
# These are functions that extract information from the various JSONs that are
# included in the classification exports. To Do: optimise these so that one .apply()
# call will extract them for everything without so many &^%@$ing loops.
def get_subject_type(q):
try:
return q[1].subject_json[q[1].subject_id]['#Class']
except:
return "cand"
def get_filename(q):
try:
return q[1].subject_json[q[1].subject_id]['CandidateFile']
except:
try:
return q[1].subject_json[q[1].subject_id]['CandidateFileVertical']
except:
try:
return q[1].subject_json[q[1].subject_id]['CandidateFileHorizontal']
except:
return "filenotfound.png"
# get number of gold-standard classifications completed by a user (used if weighting)
def get_n_gs(thegrp):
return sum(pd.DataFrame(thegrp).seed != 0)
# Something went weird with IP addresses, so use more info to determine unique users
# Note the user_name still has the IP address in it if the user is not logged in;
# it's just that for this specific project it's not that informative.
def get_alternate_sessioninfo(row):
# if they're logged in, save yourself all this trouble
if not row[1]['user_name'].startswith('not-logged-in'):
return row[1]['user_name']
else:
metadata = row[1]['meta_json']
# IP + session, if it exists
# (IP, agent, viewport_width, viewport_height) if session doesn't exist
try:
# start with "not-logged-in" so stuff later doesn't break
return str(row[1]['user_name']) +"_"+ str(metadata['session'])
except:
try:
viewport = str(metadata['viewport'])
except:
viewport = "NoViewport"
try:
user_agent = str(metadata['user_agent'])
except:
user_agent = "NoUserAgent"
try:
user_ip = str(row[1]['user_name'])
except:
user_ip = "NoUserIP"
thesession = user_ip + user_agent + viewport
return thesession
#################################################################################
#################################################################################
#################################################################################
# Print out the input parameters just as a sanity check
print("Computing aggregations using:")
print(" infile: %s" % classfile_in)
print(" weighted? %d" % apply_weight)
print(" Will print to %s after processing." % outfile)
#################################################################################
#################################################################################
#################################################################################
#
#
#
#
# Begin the main work
#
#
#
#
print("Reading classifications from %s ..." % classfile_in)
classifications = pd.read_csv(classfile_in) # this step can take a few minutes for a big file
# Talk tags are not usually huge files so this doesn't usually take that long
print("Parsing Talk tag file for team tags %s ..." % talk_export_file)
talkjson = json.loads(open(talk_export_file).read())
talktags_all = pd.DataFrame(talkjson)
# we only care about the Subject comments here, not discussions on the boards
# also we only care about tags by the research team & moderators
talktags = talktags_all[(talktags_all.taggable_type == "Subject") & (talktags_all.user_login.isin(project_team))].copy()
# make a username-tag pair column
# subject id is a string in the classifications array so force it to be one here or the match won't work
talktags['subject_id'] = [str(int(q)) for q in talktags.taggable_id]
talktags["user_tag"] = talktags.user_login+": #"+talktags.name+";"
# when we're talking about Subject tags, taggable_id is subject_id
talk_bysubj = talktags.groupby('subject_id')
# this now contains all the project-team-written tags on each subject, 1 row per subject
subj_tags = pd.DataFrame(talk_bysubj.user_tag.unique())
# if we need this as an explicit column
#subj_tags['subject_id'] = subj_tags.index
# likewise reading this matched files doesn't take long even though we have a for loop.
print("Reading master list of matched filenames %s..." % filename_master_list_filename)
matched_filenames = pd.read_csv(filename_master_list_filename)
print("Reading from list of possible matches to known pulsars %s..." % poss_match_file)
# ['Zooniverse name', 'HTRU-N name', 'Possible source']
possible_knowns = pd.read_csv(poss_match_file)
possible_knowns['is_poss_known'] = [True for q in possible_knowns['Possible source']]
# This section takes quite a while and it's because we have so many for loops, which I think is
# in part because reading out of a dict from a column in a DataFrame needs loops when done this way
# and in part because we were in a rush.
# I think it's possible we could pass this to a function and reshape things there, then return
# a set of new columns - but I didn't have time to figure that out under the deadlines we had.
print("Making new columns and getting user labels...")
# first, extract the started_at and finished_at from the annotations column
classifications['meta_json'] = [json.loads(q) for q in classifications.metadata]
classifications['started_at_str'] = [q['started_at'] for q in classifications.meta_json]
classifications['finished_at_str'] = [q['finished_at'] for q in classifications.meta_json]
# we need to set up a new user id column that's login name if the classification is while logged in,
# session if not (right now "user_name" is login name or hashed IP and, well, read on...)
# in this particular run of this particular project, session is a better tracer of uniqueness than IP
# for anonymous users, because of a bug with some back-end stuff that someone else is fixing
# but we also want to keep the user name if it exists, so let's use this function
#classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications.iterrows()]
classifications['user_label'] = [get_alternate_sessioninfo(q) for q in classifications['user_name meta_json'.split()].iterrows()]
classifications['created_day'] = [q[:10] for q in classifications.created_at]
# Get subject info into a format we can actually use
classifications['subject_json'] = [json.loads(q) for q in classifications.subject_data]
# extract the subject ID because that's needed later
# Note the subject ID becomes the *index* of the dict, which is actually pretty strange versus
# everything else in the export, and I'd really rather it be included here as "subject_id":"1234567" etc.
#
# You can isolate the keys as a new column but then it's a DictKey type, but stringifying it adds
# all these other characters that you then have to take out. Thankfully all our subject IDs are numbers
# this is a little weird and there must be a better way but... it works
classifications['subject_id'] = [str(q.keys()).replace("dict_keys(['", "").replace("'])", '') for q in classifications.subject_json]
# extract retired status, though not sure we're actually going to use it.
# also, what a mess - you have to extract the subject ID first and then use it to call the subject_json. UGH
# update: we didn't use it and each of these lines takes ages, so commenting it out
#classifications['retired'] = [q[1].subject_json[q[1].subject_id]['retired'] for q in classifications.iterrows()]
# Get annotation info into a format we can actually use
# these annotations are just a single yes or no question, yay
classifications['annotation_json'] = [json.loads(q) for q in classifications.annotations]
classifications['pulsar_classification'] = [q[0]['value'] for q in classifications.annotation_json]
# create a weight parameter but set it to 1.0 for all classifications (unweighted) - may change later
classifications['weight'] = [1.0 for q in classifications.workflow_version]
# also create a count parameter, because at the time of writing this .aggregate('count') was sometimes off by 1
classifications['count'] = [1 for q in classifications.workflow_version]
#######################################################
# discard classifications not in the active workflow #
#######################################################
print("Picking classifications from the active workflow (id %d, version %d.*)" % (active_workflow_id, active_workflow_major))
# use any workflow consistent with this major version, e.g. 6.12 and 6.23 are both 6 so they're both ok
# also check it's the correct workflow id
the_active_workflow = [int(q) == active_workflow_major for q in classifications.workflow_version]
this_workflow = classifications.workflow_id == active_workflow_id
in_workflow = this_workflow & the_active_workflow
# note I haven't saved the full DF anywhere because of memory reasons, so if you're debugging:
# classifications_all = classifications.copy()
classifications = classifications[in_workflow]
print("Extracting filenames and subject types...")
# extract whether this is a known pulsar or a candidate that needs classifying - that info is in the
# "#Class" column in the subject metadata (where # means it can't be seen by classifiers).
# the options are "cand" for "candidate", "known" for known pulsar, "disc" for a pulsar that has been
# discovered by this team but is not yet published
# do this after you choose a workflow because #Class doesn't exist for the early subjects so it will break
# also don't send the entirety of classifications into the function, to save memory
#classifications['subject_type'] = [get_subject_type(q) for q in classifications.iterrows()]
#classifications['filename'] = [get_filename(q) for q in classifications.iterrows()]
classifications['subject_type'] = [get_subject_type(q) for q in classifications['subject_id subject_json'.split()].iterrows()]
classifications['filename'] = [get_filename(q) for q in classifications['subject_id subject_json'.split()].iterrows()]
# Let me just pause a second to rant again about the fact that subject ID is the index of the subject_json.
# Because of that, because the top-level access to that was-json-now-a-dict requires the subject id rather than
# just being label:value pairs, I have to do an iterrows() and send part of the entire classifications DF into
# a loop so that I can simultaneously access each subject ID *and* the dict, rather than just accessing the
# info from the dict directly, which would be much faster.
# this might be useful for a sanity check later
# first_class_day = min(classifications.created_day).replace(' ', '')
# last_class_day = max(classifications.created_day).replace(' ', '')
# for some reason this is reporting last-classification dates that are days after the actual last
# classification. Not sure? Might be because this is a front-end reporting, so if someone has set
# their computer's time wrong we could get the wrong time here.
# could fix that by using created_at but ... I forgot.
last_class_time = max(classifications.finished_at_str)[:16].replace(' ', '_').replace('T', '_').replace(':', 'h')+"m"
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Apply weighting function (or don't) #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
classifications['seed'] = [0 for q in classifications.weight]
classifications['is_gs'] = [0 for q in classifications.weight]
if apply_weight > 0:
print(" Computing user weights...")
# for now this is assuming all subjects marked as "known" or "disc" are pulsars
# and also "fake" are simulated pulsars
is_known = (classifications.subject_type == 'known') | (classifications.subject_type == 'disc') | (classifications.subject_type == 'fake')
#is_candidate = np.invert(is_known)
# if it's a non-gold-standard classification, mark it
classifications.loc[is_known, 'is_gs'] = 1
ok_incr = 1.0 # upweight if correct
oops_incr = -2.0 # downweight more if incorrect
# find the correct classifications of known pulsars
ok_class = (is_known) & (classifications.pulsar_classification == 'Yes')
# find the incorrect classifications of known pulsars
oops_class = (is_known) & (classifications.pulsar_classification == 'No')
# set the individual seeds
classifications.loc[ok_class, 'seed'] = ok_incr
classifications.loc[oops_class, 'seed'] = oops_incr
# then group classifications by user name, which will weight logged in as well as not-logged-in (the latter by session)
by_user = classifications.groupby('user_label')
# get the user's summed seed, which goes into the exponent for the weight
user_exp = by_user.seed.aggregate('sum')
# then set up the DF that will contain the weights etc, and fill it
user_weights = pd.DataFrame(user_exp)
user_weights.columns = ['seed']
user_weights['user_label'] = user_weights.index
user_weights['nclass_user'] = by_user['count'].aggregate('sum')
user_weights['n_gs'] = by_user['is_gs'].aggregate('sum')
user_weights['weight'] = [assign_weight(q, apply_weight) for q in user_weights.iterrows()]
#user_weights['weight'] = [assign_weight_old(q) for q in user_exp]
# if you want sum(unweighted classification count) == sum(weighted classification count), do this
if normalise_weights:
user_weights.weight *= float(len(classifications))/float(sum(user_weights.weight * user_weights.nclass_user))
# weights are assigned, now need to match them up to the main classifications table
# making sure that this weight keeps the name 'weight' and the other gets renamed (suffixes flag)
# if assign_weight == 0 then we won't enter this loop and the old "weights" will stay
# as they are, i.e. == 1 uniformly.
classifications_old = classifications.copy()
classifications = pd.merge(classifications_old, user_weights, how='left',
on='user_label',
sort=False, suffixes=('_2', ''), copy=True)
else:
# just make a collated classification count array so we can print it to the screen
by_user = classifications.groupby('user_label')
user_exp = by_user.seed.aggregate('sum')
user_weights = pd.DataFrame(user_exp)
user_weights.columns = ['seed']
#user_weights['user_label'] = user_weights.index
user_weights['nclass_user'] = by_user['count'].aggregate('sum')
user_weights['n_gs'] = by_user['is_gs'].aggregate('sum')
# UNWEIGHTED
user_weights['weight'] = [1 for q in user_exp]
# grab basic stats
n_subj_tot = len(classifications.subject_data.unique())
by_subject = classifications.groupby('subject_id')
subj_class = by_subject.created_at.aggregate('count')
all_users = classifications.user_label.unique()
n_user_tot = len(all_users)
n_user_unreg = sum([q.startswith('not-logged-in-') for q in all_users])
# obviously if we didn't weight then we don't need to get stats on weights
if apply_weight > 0:
user_weight_mean = np.mean(user_weights.weight)
user_weight_median = np.median(user_weights.weight)
user_weight_25pct = np.percentile(user_weights.weight, 25)
user_weight_75pct = np.percentile(user_weights.weight, 75)
user_weight_min = min(user_weights.weight)
user_weight_max = max(user_weights.weight)
nclass_mean = np.mean(user_weights.nclass_user)
nclass_median = np.median(user_weights.nclass_user)
nclass_tot = len(classifications)
user_weights.sort_values(['nclass_user'], ascending=False, inplace=True)
# If you want to print out a file of classification counts per user, with colors for making a treemap
# honestly I'm not sure why you wouldn't want to print this, as it's very little extra effort
if counts_out == True:
print("Printing classification counts to %s..." % counts_out_file)
user_weight['color'] = [randcolor(q) for q in user_weight.index]
user_weight.to_csv(counts_out_file)
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Print out basic project info #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## #
print("%d classifications from %d users, %d registered and %d unregistered.\n" % (nclass_tot, n_user_tot, n_user_tot - n_user_unreg, n_user_unreg))
print("Mean n_class per user %.1f, median %.1f." % (nclass_mean, nclass_median))
if apply_weight > 0:
print("Mean user weight %.3f, median %.3f, with the middle 50 percent of users between %.3f and %.3f." % (user_weight_mean, user_weight_median, user_weight_25pct, user_weight_75pct))
print("The min user weight is %.3f and the max user weight is %.3f.\n" % (user_weight_min, user_weight_max))
cols_print = 'nclass_user weight'.split()
else:
cols_print = 'nclass_user'
# don't make this leaderboard public unless you want to gamify your users in ways we already know
# have unintended and sometimes negative consequences. This is just for your information.
print("Classification leaderboard:")
print(user_weights[cols_print].head(20))
print("Gini coefficient for project: %.3f" % gini(user_weight['nclass_user']))
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
#######################################################
# Aggregate classifications, unweighted and weighted #
#######################################################
## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## #
print("\nAggregating classifications...\n")
class_agg = by_subject['weight count pulsar_classification subject_type filename'.split()].apply(aggregate_class)
# really ought to replace all the NaNs with 0.0
#######################################################
# Write to files #
#######################################################
#
# add value-added columns
#
# let people look up the subject on Talk directly from the aggregated file
class_agg['link'] = ['https://www.zooniverse.org/projects/zooniverse/pulsar-hunters/talk/subjects/'+str(q) for q in class_agg.index]
# after we do the merges below the new indices might not be linked to the subject id, so save it explicitly
class_agg['subject_id'] = [str(q) for q in class_agg.index]
# match up all the ancillary file data. Maybe there's a faster way to do this than with a chain but meh,
# it's actually not *that* slow compared to the clusterf*ck of for loops in the column assignment part above
class_agg_old = class_agg.copy()
class_agg_interm = pd.merge(class_agg_old, subj_tags, how='left', left_index=True, right_index=True, sort=False, copy=True)
class_agg_interm2 = pd.merge(class_agg_interm, matched_filenames, how='left', left_on='filename', right_on='Pulsar Hunters File', sort=False, copy=True)
class_agg = pd.merge(class_agg_interm2, possible_knowns, how='left', left_on='filename', right_on='Zooniverse name', sort=False, copy=True)
# fill in the is_poss_known column with False where it is currently NaN
# currently it's either True or NaN -- with pd.isnull NaN becomes True and True becomes False, so invert that.
class_agg['is_poss_known'] = np.invert(pd.isnull(class_agg['is_poss_known']))
# make the list ranked by p_Yes_weight
class_agg.sort_values(['subject_type','p_Yes_weight'], ascending=False, inplace=True)
print("Writing aggregated output to file %s...\n" % outfile)
pd.DataFrame(class_agg).to_csv(outfile)
# Now make files ranked by p_Yes, one with all subjects classified and one with only candidates
# /Users/vrooje/anaconda/bin/ipython:1: FutureWarning: sort(columns=....) is deprecated, use sort_values(by=.....)
# #!/bin/bash /Users/vrooje/anaconda/bin/python.app
#class_agg.sort('p_Yes_weight', ascending=False, inplace=True)
class_agg.sort_values(['p_Yes_weight'], ascending=False, inplace=True)
# I'd rather note the last classification date than the date we happen to produce the file
# rightnow = datetime.datetime.now().strftime('%Y-%M-%D_%H:%M')
# rankfile_all = rankfile_stem + rightnow + ".csv"
rankfile_all = 'all_'+rankfile_stem + last_class_time + ".csv"
# there go those hard-coded columns again
rank_cols = ['subject_id', 'filename', 'p_Yes_weight', 'count_weighted', 'p_Yes', 'count_unweighted', 'subject_type', 'link', 'user_tag', 'HTRU-N File']
print("Writing full ranked list to file %s...\n" % rankfile_all)
# write just the weighted yes percentage, the weighted count, the subject type, and the link to the subject page
# the subject ID is the index so it will be written anyway
pd.DataFrame(class_agg[rank_cols]).to_csv(rankfile_all)
rankfile = 'cand_allsubj_'+rankfile_stem + last_class_time + ".csv"
print("Writing candidate-only ranked list to file %s...\n" % rankfile)
# also only include entries where there were at least 5 weighted votes tallied
# and only "cand" subject_type objects
classified_candidate = (class_agg.count_weighted > 5) & (class_agg.subject_type == 'cand')
pd.DataFrame(class_agg[rank_cols][classified_candidate]).to_csv(rankfile)
rankfile_unk = 'cand_'+rankfile_stem + last_class_time + ".csv"
print("Writing candidate-only, unknown-only ranked list to file %s...\n" % rankfile_unk)
# also only include entries where there were at least 5 weighted votes tallied
# and only "cand" subject_type objects
classified_unknown_candidate = (classified_candidate) & (np.invert(class_agg.is_poss_known))
pd.DataFrame(class_agg[rank_cols][classified_unknown_candidate]).to_csv(rankfile_unk)
# copy the candidate list into Google Drive so others can see it, overwriting previous versions
# Note: this is the way I instantly shared the new aggregated results with collaborators, because
# Google Drive automatically syncs with the online version. Dropbox would work too, etc. YMMV
cpfile = "/Users/vrooje/Google Drive/pulsar_hunters_share/all_candidates_ranked_by_classifications_%dclass.csv" % nclass_tot
print("Copying to Google Drive folder as %s..." % cpfile)
os.system("cp -f '%s' '%s'" % (rankfile, cpfile))
# and the unknown candidate sub-list
cpfile2 = "/Users/vrooje/Google Drive/pulsar_hunters_share/unknown_candidates_ranked_by_classifications_%dclass.csv" % nclass_tot
print("Copying to Google Drive folder as %s..." % cpfile2)
os.system("cp -f '%s' '%s'" % (rankfile_unk, cpfile2))
# and just for the record, all subjects.
cpfile3 = "/Users/vrooje/Google Drive/pulsar_hunters_share/all_subjects_ranked_by_classifications_%dclass.csv" % nclass_tot
print("... and %s" % cpfile3)
os.system("cp -f '%s' '%s'" % (rankfile_all, cpfile3))
#done.
| mit |
iulian787/spack | var/spack/repos/builtin/packages/candle-benchmarks/package.py | 2 | 1543 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class CandleBenchmarks(Package):
"""ECP-CANDLE Benchmarks"""
homepage = "https://github.com/ECP-CANDLE/Benchmarks"
url = "https://github.com/ECP-CANDLE/Benchmarks/archive/v0.1.tar.gz"
tags = ['proxy-app', 'ecp-proxy-app']
version('0.1', sha256='767f74f43ee3a5d4e0f26750f2a96b8433e25a9cd4f2d29938ac8acf263ab58d')
variant('mpi', default=True, description='Build with MPI support')
extends('python')
depends_on('python@2.7:')
depends_on('py-theano +gpu', type=('build', 'run'))
depends_on('py-keras', type=('build', 'run'))
depends_on('py-matplotlib +image@:2.2.3', type=('build', 'run'))
depends_on('py-tqdm', type=('build', 'run'))
depends_on('py-scikit-learn', type=('build', 'run'))
depends_on('opencv@3.2.0: +core +highgui +imgproc +jpeg +png +tiff +zlib +python -dnn ~eigen ~gtk')
depends_on('py-mdanalysis', type=('build', 'run'))
depends_on('py-mpi4py', when='+mpi', type=('build', 'run'))
depends_on('py-h5py~mpi', when='~mpi', type=('build', 'run'))
depends_on('py-h5py+mpi', when='+mpi', type=('build', 'run'))
depends_on('py-requests', type=('build', 'run'))
# see #3244, but use external for now
# depends_on('tensorflow')
def install(self, spec, prefix):
install_tree(self.stage.source_path, prefix.bin)
| lgpl-2.1 |
richjoyce/pandas_vectors | test_pv.py | 1 | 1154 | import pandas_vectors as pv
import pandas as pd
import numpy as np
import unittest
class PvTest(unittest.TestCase):
def test_indexer(self):
self.assertListEqual(pv.indexer('a'), ['a_x', 'a_y', 'a_z'])
self.assertListEqual(pv.indexer(['a']), ['a_x', 'a_y', 'a_z'])
self.assertListEqual(pv.indexer('abc'), ['abc_x', 'abc_y', 'abc_z'])
self.assertListEqual(pv.indexer(['abc']), ['abc_x', 'abc_y', 'abc_z'])
self.assertListEqual(pv.indexer(['abc','def']), ['abc_x', 'abc_y', 'abc_z', 'def_x', 'def_y', 'def_z'])
def test_vectornames(self):
pv.set_vectornames('pyr')
self.assertListEqual(pv.indexer('a'), ['a_p', 'a_y', 'a_r'])
pv.set_vectornames(['_l', '_m', '_n', '_o'])
self.assertListEqual(pv.indexer('a'), ['a_l', 'a_m', 'a_n', 'a_o'])
with pv.vectornames('xyz'):
self.assertListEqual(pv.indexer('a'), ['a_x', 'a_y', 'a_z'])
with pv.vectornames('xy'):
self.assertListEqual(pv.indexer('a'), ['a_x', 'a_y'])
self.assertListEqual(pv.indexer('a'), ['a_l', 'a_m', 'a_n', 'a_o'])
if __name__ == '__main__':
unittest.main()
| mit |
jblackburne/scikit-learn | sklearn/tree/export.py | 12 | 16020 | """
This module defines export functions for decision trees.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Trevor Stephens <trev.stephens@gmail.com>
# License: BSD 3 clause
import numpy as np
from ..externals import six
from . import _criterion
from . import _tree
def _color_brew(n):
"""Generate n colors with equally spaced hues.
Parameters
----------
n : int
The number of colors required.
Returns
-------
color_list : list, length n
List of n tuples of form (R, G, B) being the components of each color.
"""
color_list = []
# Initialize saturation & value; calculate chroma & value shift
s, v = 0.75, 0.9
c = s * v
m = v - c
for h in np.arange(25, 385, 360. / n).astype(int):
# Calculate some intermediate values
h_bar = h / 60.
x = c * (1 - abs((h_bar % 2) - 1))
# Initialize RGB with same hue & chroma as our color
rgb = [(c, x, 0),
(x, c, 0),
(0, c, x),
(0, x, c),
(x, 0, c),
(c, 0, x),
(c, x, 0)]
r, g, b = rgb[int(h_bar)]
# Shift the initial RGB values to match value and store
rgb = [(int(255 * (r + m))),
(int(255 * (g + m))),
(int(255 * (b + m)))]
color_list.append(rgb)
return color_list
def export_graphviz(decision_tree, out_file="tree.dot", max_depth=None,
feature_names=None, class_names=None, label='all',
filled=False, leaves_parallel=False, impurity=True,
node_ids=False, proportion=False, rotate=False,
rounded=False, special_characters=False):
"""Export a decision tree in DOT format.
This function generates a GraphViz representation of the decision tree,
which is then written into `out_file`. Once exported, graphical renderings
can be generated using, for example::
$ dot -Tps tree.dot -o tree.ps (PostScript format)
$ dot -Tpng tree.dot -o tree.png (PNG format)
The sample counts that are shown are weighted with any sample_weights that
might be present.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
decision_tree : decision tree classifier
The decision tree to be exported to GraphViz.
out_file : file object or string, optional (default="tree.dot")
Handle or name of the output file.
max_depth : int, optional (default=None)
The maximum depth of the representation. If None, the tree is fully
generated.
feature_names : list of strings, optional (default=None)
Names of each of the features.
class_names : list of strings, bool or None, optional (default=None)
Names of each of the target classes in ascending numerical order.
Only relevant for classification and not supported for multi-output.
If ``True``, shows a symbolic representation of the class name.
label : {'all', 'root', 'none'}, optional (default='all')
Whether to show informative labels for impurity, etc.
Options include 'all' to show at every node, 'root' to show only at
the top root node, or 'none' to not show at any node.
filled : bool, optional (default=False)
When set to ``True``, paint nodes to indicate majority class for
classification, extremity of values for regression, or purity of node
for multi-output.
leaves_parallel : bool, optional (default=False)
When set to ``True``, draw all leaf nodes at the bottom of the tree.
impurity : bool, optional (default=True)
When set to ``True``, show the impurity at each node.
node_ids : bool, optional (default=False)
When set to ``True``, show the ID number on each node.
proportion : bool, optional (default=False)
When set to ``True``, change the display of 'values' and/or 'samples'
to be proportions and percentages respectively.
rotate : bool, optional (default=False)
When set to ``True``, orient tree left to right rather than top-down.
rounded : bool, optional (default=False)
When set to ``True``, draw node boxes with rounded corners and use
Helvetica fonts instead of Times-Roman.
special_characters : bool, optional (default=False)
When set to ``False``, ignore special characters for PostScript
compatibility.
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn import tree
>>> clf = tree.DecisionTreeClassifier()
>>> iris = load_iris()
>>> clf = clf.fit(iris.data, iris.target)
>>> tree.export_graphviz(clf,
... out_file='tree.dot') # doctest: +SKIP
"""
def get_color(value):
# Find the appropriate color & intensity for a node
if colors['bounds'] is None:
# Classification tree
color = list(colors['rgb'][np.argmax(value)])
sorted_values = sorted(value, reverse=True)
if len(sorted_values) == 1:
alpha = 0
else:
alpha = int(np.round(255 * (sorted_values[0] - sorted_values[1]) /
(1 - sorted_values[1]), 0))
else:
# Regression tree or multi-output
color = list(colors['rgb'][0])
alpha = int(np.round(255 * ((value - colors['bounds'][0]) /
(colors['bounds'][1] -
colors['bounds'][0])), 0))
# Return html color code in #RRGGBBAA format
color.append(alpha)
hex_codes = [str(i) for i in range(10)]
hex_codes.extend(['a', 'b', 'c', 'd', 'e', 'f'])
color = [hex_codes[c // 16] + hex_codes[c % 16] for c in color]
return '#' + ''.join(color)
def node_to_str(tree, node_id, criterion):
# Generate the node content string
if tree.n_outputs == 1:
value = tree.value[node_id][0, :]
else:
value = tree.value[node_id]
# Should labels be shown?
labels = (label == 'root' and node_id == 0) or label == 'all'
# PostScript compatibility for special characters
if special_characters:
characters = ['#', '<SUB>', '</SUB>', '≤', '<br/>', '>']
node_string = '<'
else:
characters = ['#', '[', ']', '<=', '\\n', '"']
node_string = '"'
# Write node ID
if node_ids:
if labels:
node_string += 'node '
node_string += characters[0] + str(node_id) + characters[4]
# Write decision criteria
if tree.children_left[node_id] != _tree.TREE_LEAF:
# Always write node decision criteria, except for leaves
if feature_names is not None:
feature = feature_names[tree.feature[node_id]]
else:
feature = "X%s%s%s" % (characters[1],
tree.feature[node_id],
characters[2])
node_string += '%s %s %s%s' % (feature,
characters[3],
round(tree.threshold[node_id], 4),
characters[4])
# Write impurity
if impurity:
if isinstance(criterion, _criterion.FriedmanMSE):
criterion = "friedman_mse"
elif not isinstance(criterion, six.string_types):
criterion = "impurity"
if labels:
node_string += '%s = ' % criterion
node_string += (str(round(tree.impurity[node_id], 4)) +
characters[4])
# Write node sample count
if labels:
node_string += 'samples = '
if proportion:
percent = (100. * tree.n_node_samples[node_id] /
float(tree.n_node_samples[0]))
node_string += (str(round(percent, 1)) + '%' +
characters[4])
else:
node_string += (str(tree.n_node_samples[node_id]) +
characters[4])
# Write node class distribution / regression value
if proportion and tree.n_classes[0] != 1:
# For classification this will show the proportion of samples
value = value / tree.weighted_n_node_samples[node_id]
if labels:
node_string += 'value = '
if tree.n_classes[0] == 1:
# Regression
value_text = np.around(value, 4)
elif proportion:
# Classification
value_text = np.around(value, 2)
elif np.all(np.equal(np.mod(value, 1), 0)):
# Classification without floating-point weights
value_text = value.astype(int)
else:
# Classification with floating-point weights
value_text = np.around(value, 4)
# Strip whitespace
value_text = str(value_text.astype('S32')).replace("b'", "'")
value_text = value_text.replace("' '", ", ").replace("'", "")
if tree.n_classes[0] == 1 and tree.n_outputs == 1:
value_text = value_text.replace("[", "").replace("]", "")
value_text = value_text.replace("\n ", characters[4])
node_string += value_text + characters[4]
# Write node majority class
if (class_names is not None and
tree.n_classes[0] != 1 and
tree.n_outputs == 1):
# Only done for single-output classification trees
if labels:
node_string += 'class = '
if class_names is not True:
class_name = class_names[np.argmax(value)]
else:
class_name = "y%s%s%s" % (characters[1],
np.argmax(value),
characters[2])
node_string += class_name
# Clean up any trailing newlines
if node_string[-2:] == '\\n':
node_string = node_string[:-2]
if node_string[-5:] == '<br/>':
node_string = node_string[:-5]
return node_string + characters[5]
def recurse(tree, node_id, criterion, parent=None, depth=0):
if node_id == _tree.TREE_LEAF:
raise ValueError("Invalid node_id %s" % _tree.TREE_LEAF)
left_child = tree.children_left[node_id]
right_child = tree.children_right[node_id]
# Add node with description
if max_depth is None or depth <= max_depth:
# Collect ranks for 'leaf' option in plot_options
if left_child == _tree.TREE_LEAF:
ranks['leaves'].append(str(node_id))
elif str(depth) not in ranks:
ranks[str(depth)] = [str(node_id)]
else:
ranks[str(depth)].append(str(node_id))
out_file.write('%d [label=%s'
% (node_id,
node_to_str(tree, node_id, criterion)))
if filled:
# Fetch appropriate color for node
if 'rgb' not in colors:
# Initialize colors and bounds if required
colors['rgb'] = _color_brew(tree.n_classes[0])
if tree.n_outputs != 1:
# Find max and min impurities for multi-output
colors['bounds'] = (np.min(-tree.impurity),
np.max(-tree.impurity))
elif tree.n_classes[0] == 1 and len(np.unique(tree.value)) != 1:
# Find max and min values in leaf nodes for regression
colors['bounds'] = (np.min(tree.value),
np.max(tree.value))
if tree.n_outputs == 1:
node_val = (tree.value[node_id][0, :] /
tree.weighted_n_node_samples[node_id])
if tree.n_classes[0] == 1:
# Regression
node_val = tree.value[node_id][0, :]
else:
# If multi-output color node by impurity
node_val = -tree.impurity[node_id]
out_file.write(', fillcolor="%s"' % get_color(node_val))
out_file.write('] ;\n')
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d' % (parent, node_id))
if parent == 0:
# Draw True/False labels if parent is root node
angles = np.array([45, -45]) * ((rotate - .5) * -2)
out_file.write(' [labeldistance=2.5, labelangle=')
if node_id == 1:
out_file.write('%d, headlabel="True"]' % angles[0])
else:
out_file.write('%d, headlabel="False"]' % angles[1])
out_file.write(' ;\n')
if left_child != _tree.TREE_LEAF:
recurse(tree, left_child, criterion=criterion, parent=node_id,
depth=depth + 1)
recurse(tree, right_child, criterion=criterion, parent=node_id,
depth=depth + 1)
else:
ranks['leaves'].append(str(node_id))
out_file.write('%d [label="(...)"' % node_id)
if filled:
# color cropped nodes grey
out_file.write(', fillcolor="#C0C0C0"')
out_file.write('] ;\n' % node_id)
if parent is not None:
# Add edge to parent
out_file.write('%d -> %d ;\n' % (parent, node_id))
own_file = False
try:
if isinstance(out_file, six.string_types):
if six.PY3:
out_file = open(out_file, "w", encoding="utf-8")
else:
out_file = open(out_file, "wb")
own_file = True
# The depth of each node for plotting with 'leaf' option
ranks = {'leaves': []}
# The colors to render each node with
colors = {'bounds': None}
out_file.write('digraph Tree {\n')
# Specify node aesthetics
out_file.write('node [shape=box')
rounded_filled = []
if filled:
rounded_filled.append('filled')
if rounded:
rounded_filled.append('rounded')
if len(rounded_filled) > 0:
out_file.write(', style="%s", color="black"'
% ", ".join(rounded_filled))
if rounded:
out_file.write(', fontname=helvetica')
out_file.write('] ;\n')
# Specify graph & edge aesthetics
if leaves_parallel:
out_file.write('graph [ranksep=equally, splines=polyline] ;\n')
if rounded:
out_file.write('edge [fontname=helvetica] ;\n')
if rotate:
out_file.write('rankdir=LR ;\n')
# Now recurse the tree and add node & edge attributes
if isinstance(decision_tree, _tree.Tree):
recurse(decision_tree, 0, criterion="impurity")
else:
recurse(decision_tree.tree_, 0, criterion=decision_tree.criterion)
# If required, draw leaf nodes at same depth as each other
if leaves_parallel:
for rank in sorted(ranks):
out_file.write("{rank=same ; " +
"; ".join(r for r in ranks[rank]) + "} ;\n")
out_file.write("}")
finally:
if own_file:
out_file.close()
| bsd-3-clause |
toastedcornflakes/scikit-learn | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
russel1237/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
anirudhjayaraman/scikit-learn | sklearn/semi_supervised/tests/test_label_propagation.py | 307 | 1974 | """ test the label propagation module """
import nose
import numpy as np
from sklearn.semi_supervised import label_propagation
from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
ESTIMATORS = [
(label_propagation.LabelPropagation, {'kernel': 'rbf'}),
(label_propagation.LabelPropagation, {'kernel': 'knn', 'n_neighbors': 2}),
(label_propagation.LabelSpreading, {'kernel': 'rbf'}),
(label_propagation.LabelSpreading, {'kernel': 'knn', 'n_neighbors': 2})
]
def test_fit_transduction():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
nose.tools.assert_equal(clf.transduction_[2], 1)
def test_distribution():
samples = [[1., 0.], [0., 1.], [1., 1.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
if parameters['kernel'] == 'knn':
continue # unstable test; changes in k-NN ordering break it
assert_array_almost_equal(clf.predict_proba([[1., 0.0]]),
np.array([[1., 0.]]), 2)
else:
assert_array_almost_equal(np.asarray(clf.label_distributions_[2]),
np.array([.5, .5]), 2)
def test_predict():
samples = [[1., 0.], [0., 2.], [1., 3.]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_equal(clf.predict([[0.5, 2.5]]), np.array([1]))
def test_predict_proba():
samples = [[1., 0.], [0., 1.], [1., 2.5]]
labels = [0, 1, -1]
for estimator, parameters in ESTIMATORS:
clf = estimator(**parameters).fit(samples, labels)
assert_array_almost_equal(clf.predict_proba([[1., 1.]]),
np.array([[0.5, 0.5]]))
| bsd-3-clause |
nicproulx/mne-python | mne/preprocessing/tests/test_xdawn.py | 3 | 7741 | # Authors: Alexandre Barachant <alexandre.barachant@gmail.com>
# Jean-Remi King <jeanremi.king@gmail.com>
#
# License: BSD (3-clause)
import numpy as np
import os.path as op
from nose.tools import assert_equal, assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_array_almost_equal
from mne import Epochs, read_events, pick_types, compute_raw_covariance
from mne.io import read_raw_fif
from mne.utils import requires_sklearn, run_tests_if_main
from mne.preprocessing.xdawn import Xdawn, _XdawnTransformer
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
tmin, tmax = -0.1, 0.2
event_id = dict(cond2=2, cond3=3)
def _get_data():
"""Get data."""
raw = read_raw_fif(raw_fname, verbose=False, preload=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False,
ecg=False, eog=False,
exclude='bads')[::8]
return raw, events, picks
def test_xdawn():
"""Test init of xdawn."""
# Init xdawn with good parameters
Xdawn(n_components=2, correct_overlap='auto', signal_cov=None, reg=None)
# Init xdawn with bad parameters
assert_raises(ValueError, Xdawn, correct_overlap=42)
def test_xdawn_fit():
"""Test Xdawn fit."""
# Get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# =========== Basic Fit test =================
# Test base xdawn
xd = Xdawn(n_components=2, correct_overlap='auto')
xd.fit(epochs)
# With these parameters, the overlap correction must be False
assert_equal(xd.correct_overlap_, False)
# No overlap correction should give averaged evoked
evoked = epochs['cond2'].average()
assert_array_equal(evoked.data, xd.evokeds_['cond2'].data)
# ========== with signal cov provided ====================
# Provide covariance object
signal_cov = compute_raw_covariance(raw, picks=picks)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray
signal_cov = np.eye(len(picks))
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray of bad shape
signal_cov = np.eye(len(picks) - 1)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
assert_raises(ValueError, xd.fit, epochs)
# Provide another type
signal_cov = 42
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
assert_raises(ValueError, xd.fit, epochs)
# Fit with baseline correction and overlap correction should throw an
# error
# XXX This is a buggy test, the epochs here don't overlap
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=(None, 0), verbose=False)
xd = Xdawn(n_components=2, correct_overlap=True)
assert_raises(ValueError, xd.fit, epochs)
def test_xdawn_apply_transform():
"""Test Xdawn apply and transform."""
# Get data
raw, events, picks = _get_data()
raw.pick_types(eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
preload=True, baseline=None,
verbose=False)
n_components = 2
# Fit Xdawn
xd = Xdawn(n_components=n_components, correct_overlap=False)
xd.fit(epochs)
# Apply on different types of instances
for inst in [raw, epochs.average(), epochs]:
denoise = xd.apply(inst)
# Apply on other thing should raise an error
assert_raises(ValueError, xd.apply, 42)
# Transform on epochs
xd.transform(epochs)
# Transform on ndarray
xd.transform(epochs._data)
# Transform on someting else
assert_raises(ValueError, xd.transform, 42)
# Check numerical results with shuffled epochs
np.random.seed(0) # random makes unstable linalg
idx = np.arange(len(epochs))
np.random.shuffle(idx)
xd.fit(epochs[idx])
denoise_shfl = xd.apply(epochs)
assert_array_almost_equal(denoise['cond2']._data,
denoise_shfl['cond2']._data)
@requires_sklearn
def test_xdawn_regularization():
"""Test Xdawn with regularization."""
# Get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# Test with overlapping events.
# modify events to simulate one overlap
events = epochs.events
sel = np.where(events[:, 2] == 2)[0][:2]
modified_event = events[sel[0]]
modified_event[0] += 1
epochs.events[sel[1]] = modified_event
# Fit and check that overlap was found and applied
xd = Xdawn(n_components=2, correct_overlap='auto', reg='oas')
xd.fit(epochs)
assert_equal(xd.correct_overlap_, True)
evoked = epochs['cond2'].average()
assert_true(np.sum(np.abs(evoked.data - xd.evokeds_['cond2'].data)))
# With covariance regularization
for reg in [.1, 0.1, 'ledoit_wolf', 'oas']:
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(picks)), reg=reg)
xd.fit(epochs)
# With bad shrinkage
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(picks)), reg=2)
assert_raises(ValueError, xd.fit, epochs)
@requires_sklearn
def test_XdawnTransformer():
"""Test _XdawnTransformer."""
# Get data
raw, events, picks = _get_data()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
X = epochs._data
y = epochs.events[:, -1]
# Fit
xdt = _XdawnTransformer()
xdt.fit(X, y)
assert_raises(ValueError, xdt.fit, X, y[1:])
assert_raises(ValueError, xdt.fit, 'foo')
# Provide covariance object
signal_cov = compute_raw_covariance(raw, picks=picks)
xdt = _XdawnTransformer(signal_cov=signal_cov)
xdt.fit(X, y)
# Provide ndarray
signal_cov = np.eye(len(picks))
xdt = _XdawnTransformer(signal_cov=signal_cov)
xdt.fit(X, y)
# Provide ndarray of bad shape
signal_cov = np.eye(len(picks) - 1)
xdt = _XdawnTransformer(signal_cov=signal_cov)
assert_raises(ValueError, xdt.fit, X, y)
# Provide another type
signal_cov = 42
xdt = _XdawnTransformer(signal_cov=signal_cov)
assert_raises(ValueError, xdt.fit, X, y)
# Fit with y as None
xdt = _XdawnTransformer()
xdt.fit(X)
# Compare xdawn and _XdawnTransformer
xd = Xdawn(correct_overlap=False)
xd.fit(epochs)
xdt = _XdawnTransformer()
xdt.fit(X, y)
assert_array_almost_equal(xd.filters_['cond2'][:, :2],
xdt.filters_.reshape(2, 2, 8)[0].T)
# Transform testing
xdt.transform(X[1:, ...]) # different number of epochs
xdt.transform(X[:, :, 1:]) # different number of time
assert_raises(ValueError, xdt.transform, X[:, 1:, :])
Xt = xdt.transform(X)
assert_raises(ValueError, xdt.transform, 42)
# Inverse transform testing
Xinv = xdt.inverse_transform(Xt)
assert_equal(Xinv.shape, X.shape)
xdt.inverse_transform(Xt[1:, ...])
xdt.inverse_transform(Xt[:, :, 1:])
# should raise an error if not correct number of components
assert_raises(ValueError, xdt.inverse_transform, Xt[:, 1:, :])
assert_raises(ValueError, xdt.inverse_transform, 42)
run_tests_if_main()
| bsd-3-clause |
tedunderwood/GenreProject | python/workshop/bagofwords.py | 2 | 6896 | # bagofwords.py
#
# The BagOfWords class implements individual volumes as ordered
# lists of features.
#
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
def all_nonalphanumeric(astring):
nonalphanum = True
for character in astring:
if character.isalpha() or character.isdigit():
nonalphanum = False
break
return nonalphanum
class BagOfWords:
def __init__(self, filepath, volID, include_punctuation):
''' Construct a BagOfWords.
volID is a string label for the volume.
include_punctuation is a boolean.
'''
self.volID = volID
with open(filepath, encoding = 'utf-8') as f:
filelines = f.readlines()
self.rawcounts = dict()
self.totalcount = 0
for line in filelines:
line = line.rstrip()
fields = line.split('\t')
if len(fields) != 2:
print("Illegal line length in " + filepath)
print(line)
continue
else:
tokentype = fields[0]
count = fields[1]
try:
intcount = int(count)
if include_punctuation or not all_nonalphanumeric(tokentype):
self.rawcounts[tokentype] = intcount
self.totalcount += intcount
except ValueError:
print("Cannot parse count " + count + " as integer.")
continue
self.numrawcounts = len(self.rawcounts)
def selectfeatures(self, featurelist):
''' A BagOfWords is created with merely a dictionary of raw token counts.
One could call this a sparse table. It has no entries where features are
missing.
We need to organize these as an ordered series of features, which includes
only the features we have chosen to use in the current model, and has zeroes for
missing values.
'''
self.featurelist = featurelist
self.numfeatures = len(featurelist)
self.features = Series(self.rawcounts, index = featurelist, dtype = 'float64')
# Pandas has the nice feature of building a series from a dictionary if it's
# provided an index of values. So this effectively builds a series of entries
# ordered by the keys in 'featurelist,' with NaN in places where rawcounts
# had no corresponding key.
self.features[self.features.isnull()] = 0
# This replaces NaN with zero, since missing words are effectively words with
# count == 0.
def normalizefrequencies(self):
''' Simply divides all frequencies by the total token count for this volume.
'''
self.features = self.features / self.totalcount
def standardizefrequencies(self, standardizer):
''' Convert features to z-scores by centering them on the means and
scaling them by standard deviation.
standardizer = an object of class StandardizingVector, presumably created
either on the corpus that contains this volume, or on the training corpus
that created the model we are about to use on this volume.
'''
assert len(self.features) == len(standardizer.means)
self.features = (self.features - standardizer.means) / standardizer.stdevs
class StandardizingVector:
''' An object that computes the means and standard deviations of features
across a corpus of volumes. These statistics can then be used to standardize
the feature vectors in volumes.
'''
def __init__(self, listofvolumes, featurelist):
numvolumes = len(listofvolumes)
numfeatures = len(featurelist)
# First a simple sanity check. We are talking about volumes with
# the same number of features, right?
for avolume in listofvolumes:
assert avolume.numfeatures == numfeatures
# And how about a spot check to make sure the lists are really the same?
for ourfeature, itsfeature in zip(featurelist, listofvolumes[0].featurelist):
assert ourfeature == itsfeature
# Okay, we're good. Initialize some pandas series.
means = list()
stdevs = list()
for afeature in featurelist:
featuredistribution = np.zeros(numvolumes)
# For each feature, create an array of possible values by polling volumes.
for volidx, avolume in enumerate(listofvolumes):
featuredistribution[volidx] = avolume.features[afeature]
# Then calculate mean and standard deviation for this feature.
thismean = np.mean(featuredistribution)
thisstd = np.std(featuredistribution)
if thisstd == 0:
print("Problematic standard deviation of zero for feature " + afeature)
thisstd = 0.0000001
# Cheesy hack is my middle name.
means.append(thismean)
stdevs.append(thisstd)
self.means = Series(means, index = featurelist)
self.stdevs = Series(stdevs, index = featurelist)
self.features = featurelist
# Because we're going to need the list of features to apply this model
# to other volumes.
# Done.
class WordVector:
''' A WordVector is just like a BagOfWords, except that it has
a simpler constructor — it just accepts a list of tokens.
In Java, you could write multiple constructors for one class.
In Python, I'd have to rewrite the constructor inelegantly to make
these a single class. So. Two classes.
'''
def __init__(self, listofwords):
''' Construct a WordVector from a list.
'''
self.rawcounts = dict()
self.totalcount = 0
for word in listofwords:
self.totalcount += 1
if word in self.rawcounts:
self.rawcounts[word] += 1
else:
self.rawcounts[word] = 1
self.numrawcounts = len(self.rawcounts)
def selectfeatures(self, featurelist):
''' A WordVector is created with merely a dictionary of raw token counts.
One could call this a sparse table. It has no entries where features are
missing.
We need to organize these as an ordered series of features, which includes
only the features we have chosen to use in the current model, and has zeroes for
missing values.
'''
self.featurelist = featurelist
self.numfeatures = len(featurelist)
self.features = Series(self.rawcounts, index = featurelist, dtype = 'float64')
# Pandas has the nice feature of building a series from a dictionary if it's
# provided an index of values. So this effectively builds a series of entries
# ordered by the keys in 'featurelist,' with NaN in places where rawcounts
# had no corresponding key.
self.features[self.features.isnull()] = 0
# This replaces NaN with zero, since missing words are effectively words with
# count == 0.
def normalizefrequencies(self):
''' Simply divides all frequencies by the total token count for this volume.
'''
self.features = self.features / self.totalcount
def standardizefrequencies(self, standardizer):
''' Convert features to z-scores by centering them on the means and
scaling them by standard deviation.
standardizer = an object of class StandardizingVector, presumably created
either on the corpus that contains this volume, or on the training corpus
that created the model we are about to use on this volume.
'''
assert len(self.features) == len(standardizer.means)
self.features = (self.features - standardizer.means) / standardizer.stdevs
| mit |
YISION/yision.github.io | randomforest-决策树与随机森林/f.py | 1 | 2724 | #encoding=utf8
from sklearn.ensemble import RandomForestClassifier
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#导入数据集与设定因变量及响应变量#
data=pd.read_csv('data.csv')
#data=pd.read_csv('data_p.csv')
df=pd.DataFrame(data)
df=df.dropna()#去除缺失值#
y=df.BAD
x=pd.concat([df.LOAN,df.MORTDUE,df.VALUE,df.YOJ,df.DEROG,df.DELINQ,df.CLAGE,df.NINQ,df.CLNO],axis=1)
#x=pd.concat([df.LOAN,df.MORTDUE,df.VALUE,df.YOJ,df.DEROG,df.DELINQ,df.CLAGE,df.NINQ,df.CLNO,df.Resn_HomeImp,df.Resn_DebtCon,df.Job_Mgr,df.Job_Office,df.Job_Self,df.Job_ProfExe,df.Job_Sales,df.Job_Other],axis=1)
print df.head()
#分差训练集与测试集#
from sklearn.cross_validation import train_test_split
x_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.4,random_state=1)
#使用决策树#
from sklearn import tree, cross_validation
clf=tree.DecisionTreeClassifier(criterion='entropy',min_samples_leaf=8)
#print (clf)
print clf.fit(x_train,y_train)
#对测试集进行验证#
print("决策树准确率:{:.16f}".format(clf.score(x_test,y_test)))
print("决策树训练集:{:.16f}".format(clf.score(x_train,y_train)))
#生成特征图#
feature_importance=clf.feature_importances_
important_features=x_train.columns.values[0::]
feature_importance=100.0*(feature_importance/feature_importance.max())
sorted_idx=np.argsort(feature_importance)[::-1]
pos=np.arange(sorted_idx.shape[0])+.5
plt.title('Feature Importance')
plt.barh(pos,feature_importance[sorted_idx[::-1]],color='r',align='center')
plt.yticks(pos,important_features)
plt.xlabel('Relative Importance')
plt.draw()
plt.show()
#描绘决策树#
import pydot,StringIO
dot_data = StringIO.StringIO()
tree.export_graphviz(clf, out_file=dot_data, feature_names=['LOAN','MORTDUE','VALUE','YOJ','DEROG','DELINQ','CLAGE','NINQ','CLNO'])
dot_data.getvalue()
pydot.graph_from_dot_data(dot_data.getvalue())
graph = pydot.graph_from_dot_data(dot_data.getvalue())
graph.write_png('tree.png')
from IPython.core.display import Image
Image(filename='tree.png')
#对模型进行交叉验证#
score1=cross_validation.cross_val_score(clf, x, y,cv=10)
print score1
#随机森林#
clf2=RandomForestClassifier(n_estimators=1000,criterion='entropy',min_samples_leaf=8,random_state=1,n_jobs=5)
#print (clf2)
print clf2.fit(x_train,y_train)
#对测试集进行验证#
print("随机森林准确率:{:.16f}".format(clf2.score(x_test,y_test)))
print("随机森林训练集:{:.16f}".format(clf2.score(x_train,y_train)))
#两者模型比较,交叉验证#
score2=cross_validation.cross_val_score(clf2,x,y,cv=10)
print score2
print ("决策树交叉验证:")
print score1.mean()
print ("随机森林交叉验证:")
print score2.mean()
| mit |
466152112/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 57 | 13752 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((rows.shape[0], X.shape[1]), dtype=np.float64)
assign_rows_csr(X, rows,
np.arange(out.shape[0], dtype=np.intp)[::-1], out)
assert_array_equal(out, X[rows].toarray()[::-1])
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
abandons/jieba | test/extract_topic.py | 65 | 1463 | import sys
sys.path.append("../")
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn import decomposition
import jieba
import time
import glob
import sys
import os
import random
if len(sys.argv)<2:
print("usage: extract_topic.py directory [n_topic] [n_top_words]")
sys.exit(0)
n_topic = 10
n_top_words = 25
if len(sys.argv)>2:
n_topic = int(sys.argv[2])
if len(sys.argv)>3:
n_top_words = int(sys.argv[3])
count_vect = CountVectorizer()
docs = []
pattern = os.path.join(sys.argv[1],"*.txt")
print("read "+pattern)
for f_name in glob.glob(pattern):
with open(f_name) as f:
print("read file:", f_name)
for line in f: #one line as a document
words = " ".join(jieba.cut(line))
docs.append(words)
random.shuffle(docs)
print("read done.")
print("transform")
counts = count_vect.fit_transform(docs)
tfidf = TfidfTransformer().fit_transform(counts)
print(tfidf.shape)
t0 = time.time()
print("training...")
nmf = decomposition.NMF(n_components=n_topic).fit(tfidf)
print("done in %0.3fs." % (time.time() - t0))
# Inverse the vectorizer vocabulary to be able
feature_names = count_vect.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print("")
| mit |
pair-code/lit | lit_nlp/examples/coref/datasets/winogender.py | 2 | 6390 | """Coreference version of the Winogender dataset.
Each instance has two edges, one between the pronoun and the occupation and one
between the pronoun and the participant. The pronoun is always span1.
There are 120 templates in the Winogender set, 60 coreferent with the
occupation, and 60 coreferent with the participant. Each is instantiated
six times: with and without "someone" substituting for the participant,
and with {male, female, neutral} pronouns, for a total of 720 examples.
Winogender repo: https://github.com/rudinger/winogender-schemas
Paper: Gender Bias in Coreference Resolution (Rudinger et al. 2018),
https://arxiv.org/pdf/1804.09301.pdf
"""
import enum
import os
from typing import Optional
from absl import logging
from lit_nlp.api import dataset as lit_dataset
from lit_nlp.api import dtypes as lit_dtypes
from lit_nlp.api import types as lit_types
import pandas as pd
import transformers # for file caching
EdgeLabel = lit_dtypes.EdgeLabel
DATA_ROOT = "https://raw.githubusercontent.com/rudinger/winogender-schemas/master/data/" # pylint: disable=line-too-long
def get_data(name):
"""Download data or return local cache path."""
url = os.path.join(DATA_ROOT, name)
logging.info("Winogender: retrieving data file %s", url)
return transformers.file_utils.cached_path(url)
## From gap-coreference/constants.py
class Gender(enum.Enum):
UNKNOWN = 0
MASCULINE = 1
FEMININE = 2
NOM = "$NOM_PRONOUN"
POSS = "$POSS_PRONOUN"
ACC = "$ACC_PRONOUN"
PRONOUN_MAP = {
Gender.FEMININE: {
NOM: "she",
POSS: "her",
ACC: "her"
},
Gender.MASCULINE: {
NOM: "he",
POSS: "his",
ACC: "him"
},
Gender.UNKNOWN: {
NOM: "they",
POSS: "their",
ACC: "them"
},
}
ANSWER_VOCAB = ["occupation", "participant"]
PRONOUNS_BY_GENDER = {k: "/".join(PRONOUN_MAP[k].values()) for k in PRONOUN_MAP}
# Based on winogender-schemas/scripts/instantiate.py, but adapted to LIT format.
def generate_instance(occupation,
participant,
answer,
sentence,
gender=Gender.UNKNOWN,
someone=False):
"""Generate a Winogender example from a template row."""
toks = sentence.split(" ")
part_index = toks.index("$PARTICIPANT")
if not someone:
# we are using the instantiated participant,
# e.g. "client", "patient", "customer",...
toks[part_index] = participant
else: # we are using the bleached NP "someone" for the other participant
# first, remove the token that precedes $PARTICIPANT, i.e. "the"
toks = toks[:part_index - 1] + toks[part_index:]
# recompute participant index (it should be part_index - 1)
part_index = toks.index("$PARTICIPANT")
toks[part_index] = "Someone" if part_index == 0 else "someone"
# Make sure we do this /after/ substituting "someone",
# since that may change indices.
occ_index = toks.index("$OCCUPATION")
# This should always pass on the regular Winogender dataset.
assert " " not in occupation, "Occupation must be single-token."
toks[occ_index] = occupation
pronoun_idx = None
gendered_toks = []
for i, t in enumerate(toks):
sub = PRONOUN_MAP[gender].get(t, t)
if sub != t:
pronoun_idx = i
gendered_toks.append(sub)
# NOM, POSS, ACC
pronoun_type = toks[pronoun_idx][1:].replace("_PRONOUN", "")
# Process text for fluency
text = " ".join(gendered_toks)
text = text.replace("they was", "they were")
text = text.replace("They was", "They were")
record = {"text": text, "tokens": text.split()}
t0 = EdgeLabel(
span1=(occ_index, occ_index + 1),
span2=(pronoun_idx, pronoun_idx + 1),
label=int(1 if answer == 0 else 0))
t1 = EdgeLabel(
span1=(part_index, part_index + 1),
span2=(pronoun_idx, pronoun_idx + 1),
label=int(1 if answer == 1 else 0))
record["coref"] = [t0, t1]
record.update({
"occupation": occupation,
"participant": participant,
"answer": ANSWER_VOCAB[answer],
"someone": str(someone),
"pronouns": PRONOUNS_BY_GENDER[gender],
"pronoun_type": pronoun_type,
"gender": gender.name,
})
return record
class WinogenderDataset(lit_dataset.Dataset):
"""Coreference on Winogender schemas (Rudinger et al. 2018)."""
# These should match the args to generate_instance()
TSV_COLUMN_NAMES = ["occupation", "participant", "answer", "sentence"]
def __init__(self,
templates_path: Optional[str] = None,
occupation_stats_path: Optional[str] = None):
templates_path = templates_path or get_data("templates.tsv")
occupation_stats_path = occupation_stats_path or get_data(
"occupations-stats.tsv")
# Load templates and make a DataFrame.
with open(templates_path) as fd:
self.templates_df = pd.read_csv(
fd, sep="\t", header=0, names=self.TSV_COLUMN_NAMES)
# Load occpuation stats.
with open(occupation_stats_path) as fd:
self.occupation_df = pd.read_csv(fd, sep="\t").set_index("occupation")
# Make examples for each {someone} x {gender} x {template}
self._examples = []
for _, row in self.templates_df.iterrows():
for someone in {False, True}:
for gender in Gender:
r = generate_instance(someone=someone, gender=gender, **row)
r["pf_bls"] = (
self.occupation_df.bls_pct_female[r["occupation"]] / 100.0)
self._examples.append(r)
def spec(self):
return {
"text":
lit_types.TextSegment(),
"tokens":
lit_types.Tokens(parent="text"),
"coref":
lit_types.EdgeLabels(align="tokens"),
# Metadata fields for filtering and analysis.
"occupation":
lit_types.CategoryLabel(),
"participant":
lit_types.CategoryLabel(),
"answer":
lit_types.CategoryLabel(vocab=ANSWER_VOCAB),
"someone":
lit_types.CategoryLabel(vocab=["True", "False"]),
"pronouns":
lit_types.CategoryLabel(vocab=list(PRONOUNS_BY_GENDER.values())),
"pronoun_type":
lit_types.CategoryLabel(vocab=["NOM", "POSS", "ACC"]),
"gender":
lit_types.CategoryLabel(vocab=[g.name for g in Gender]),
"pf_bls":
lit_types.Scalar(),
}
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/series/test_indexing.py | 3 | 88164 | # coding=utf-8
# pylint: disable-msg=E1101,W0612
import pytest
from datetime import datetime, timedelta
from numpy import nan
import numpy as np
import pandas as pd
import pandas._libs.index as _index
from pandas.core.dtypes.common import is_integer, is_scalar
from pandas import (Index, Series, DataFrame, isnull,
date_range, NaT, MultiIndex,
Timestamp, DatetimeIndex, Timedelta)
from pandas.core.indexing import IndexingError
from pandas.tseries.offsets import BDay
from pandas._libs import tslib, lib
from pandas.compat import lrange, range
from pandas import compat
from pandas.util.testing import (slow,
assert_series_equal,
assert_almost_equal,
assert_frame_equal)
import pandas.util.testing as tm
from pandas.tests.series.common import TestData
JOIN_TYPES = ['inner', 'outer', 'left', 'right']
class TestSeriesIndexing(TestData):
def test_get(self):
# GH 6383
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56, 45,
51, 39, 55, 43, 54, 52, 51, 54]))
result = s.get(25, 0)
expected = 0
assert result == expected
s = Series(np.array([43, 48, 60, 48, 50, 51, 50, 45, 57, 48, 56,
45, 51, 39, 55, 43, 54, 52, 51, 54]),
index=pd.Float64Index(
[25.0, 36.0, 49.0, 64.0, 81.0, 100.0,
121.0, 144.0, 169.0, 196.0, 1225.0,
1296.0, 1369.0, 1444.0, 1521.0, 1600.0,
1681.0, 1764.0, 1849.0, 1936.0],
dtype='object'))
result = s.get(25, 0)
expected = 43
assert result == expected
# GH 7407
# with a boolean accessor
df = pd.DataFrame({'i': [0] * 3, 'b': [False] * 3})
vc = df.i.value_counts()
result = vc.get(99, default='Missing')
assert result == 'Missing'
vc = df.b.value_counts()
result = vc.get(False, default='Missing')
assert result == 3
result = vc.get(True, default='Missing')
assert result == 'Missing'
def test_get_nan(self):
# GH 8569
s = pd.Float64Index(range(10)).to_series()
assert s.get(np.nan) is None
assert s.get(np.nan, default='Missing') == 'Missing'
# ensure that fixing the above hasn't broken get
# with multiple elements
idx = [20, 30]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
idx = [np.nan, np.nan]
assert_series_equal(s.get(idx),
Series([np.nan] * 2, index=idx))
def test_delitem(self):
# GH 5542
# should delete the item inplace
s = Series(lrange(5))
del s[0]
expected = Series(lrange(1, 5), index=lrange(1, 5))
assert_series_equal(s, expected)
del s[1]
expected = Series(lrange(2, 5), index=lrange(2, 5))
assert_series_equal(s, expected)
# empty
s = Series()
def f():
del s[0]
pytest.raises(KeyError, f)
# only 1 left, del, add, del
s = Series(1)
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
s[0] = 1
assert_series_equal(s, Series(1))
del s[0]
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='int64')))
# Index(dtype=object)
s = Series(1, index=['a'])
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
s['a'] = 1
assert_series_equal(s, Series(1, index=['a']))
del s['a']
assert_series_equal(s, Series(dtype='int64', index=Index(
[], dtype='object')))
def test_getitem_setitem_ellipsis(self):
s = Series(np.random.randn(10))
np.fix(s)
result = s[...]
assert_series_equal(result, s)
s[...] = 5
assert (result == 5).all()
def test_getitem_negative_out_of_bounds(self):
s = Series(tm.rands_array(5, 10), index=tm.rands_array(10, 10))
pytest.raises(IndexError, s.__getitem__, -11)
pytest.raises(IndexError, s.__setitem__, -11, 'foo')
def test_pop(self):
# GH 6600
df = DataFrame({'A': 0, 'B': np.arange(5, dtype='int64'), 'C': 0, })
k = df.iloc[4]
result = k.pop('B')
assert result == 4
expected = Series([0, 0], index=['A', 'C'], name=4)
assert_series_equal(k, expected)
def test_getitem_get(self):
idx1 = self.series.index[5]
idx2 = self.objSeries.index[5]
assert self.series[idx1] == self.series.get(idx1)
assert self.objSeries[idx2] == self.objSeries.get(idx2)
assert self.series[idx1] == self.series[5]
assert self.objSeries[idx2] == self.objSeries[5]
assert self.series.get(-1) == self.series.get(self.series.index[-1])
assert self.series[5] == self.series.get(self.series.index[5])
# missing
d = self.ts.index[0] - BDay()
pytest.raises(KeyError, self.ts.__getitem__, d)
# None
# GH 5652
for s in [Series(), Series(index=list('abc'))]:
result = s.get(None)
assert result is None
def test_iloc(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
for i in range(len(s)):
result = s.iloc[i]
exp = s[s.index[i]]
assert_almost_equal(result, exp)
# pass a slice
result = s.iloc[slice(1, 3)]
expected = s.loc[2:4]
assert_series_equal(result, expected)
# test slice is a view
result[:] = 0
assert (s[1:3] == 0).all()
# list of integers
result = s.iloc[[0, 2, 3, 4, 5]]
expected = s.reindex(s.index[[0, 2, 3, 4, 5]])
assert_series_equal(result, expected)
def test_iloc_nonunique(self):
s = Series([0, 1, 2], index=[0, 1, 0])
assert s.iloc[2] == 2
def test_getitem_regression(self):
s = Series(lrange(5), index=lrange(5))
result = s[lrange(5)]
assert_series_equal(result, s)
def test_getitem_setitem_slice_bug(self):
s = Series(lrange(10), lrange(10))
result = s[-12:]
assert_series_equal(result, s)
result = s[-7:]
assert_series_equal(result, s[3:])
result = s[:-12]
assert_series_equal(result, s[:0])
s = Series(lrange(10), lrange(10))
s[-12:] = 0
assert (s == 0).all()
s[:-12] = 5
assert (s == 0).all()
def test_getitem_int64(self):
idx = np.int64(5)
assert self.ts[idx] == self.ts[5]
def test_getitem_fancy(self):
slice1 = self.series[[1, 2, 3]]
slice2 = self.objSeries[[1, 2, 3]]
assert self.series.index[2] == slice1.index[1]
assert self.objSeries.index[2] == slice2.index[1]
assert self.series[2] == slice1[1]
assert self.objSeries[2] == slice2[1]
def test_getitem_boolean(self):
s = self.series
mask = s > s.median()
# passing list is OK
result = s[list(mask)]
expected = s[mask]
assert_series_equal(result, expected)
tm.assert_index_equal(result.index, s.index[mask])
def test_getitem_boolean_empty(self):
s = Series([], dtype=np.int64)
s.index.name = 'index_name'
s = s[s.isnull()]
assert s.index.name == 'index_name'
assert s.dtype == np.int64
# GH5877
# indexing with empty series
s = Series(['A', 'B'])
expected = Series(np.nan, index=['C'], dtype=object)
result = s[Series(['C'], dtype=object)]
assert_series_equal(result, expected)
s = Series(['A', 'B'])
expected = Series(dtype=object, index=Index([], dtype='int64'))
result = s[Series([], dtype=object)]
assert_series_equal(result, expected)
# invalid because of the boolean indexer
# that's empty or not-aligned
def f():
s[Series([], dtype=bool)]
pytest.raises(IndexingError, f)
def f():
s[Series([True], dtype=bool)]
pytest.raises(IndexingError, f)
def test_getitem_generator(self):
gen = (x > 0 for x in self.series)
result = self.series[gen]
result2 = self.series[iter(self.series > 0)]
expected = self.series[self.series > 0]
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
def test_type_promotion(self):
# GH12599
s = pd.Series()
s["a"] = pd.Timestamp("2016-01-01")
s["b"] = 3.0
s["c"] = "foo"
expected = Series([pd.Timestamp("2016-01-01"), 3.0, "foo"],
index=["a", "b", "c"])
assert_series_equal(s, expected)
def test_getitem_boolean_object(self):
# using column from DataFrame
s = self.series
mask = s > s.median()
omask = mask.astype(object)
# getitem
result = s[omask]
expected = s[mask]
assert_series_equal(result, expected)
# setitem
s2 = s.copy()
cop = s.copy()
cop[omask] = 5
s2[mask] = 5
assert_series_equal(cop, s2)
# nans raise exception
omask[5:10] = np.nan
pytest.raises(Exception, s.__getitem__, omask)
pytest.raises(Exception, s.__setitem__, omask, 5)
def test_getitem_setitem_boolean_corner(self):
ts = self.ts
mask_shifted = ts.shift(1, freq=BDay()) > ts.median()
# these used to raise...??
pytest.raises(Exception, ts.__getitem__, mask_shifted)
pytest.raises(Exception, ts.__setitem__, mask_shifted, 1)
# ts[mask_shifted]
# ts[mask_shifted] = 1
pytest.raises(Exception, ts.loc.__getitem__, mask_shifted)
pytest.raises(Exception, ts.loc.__setitem__, mask_shifted, 1)
# ts.loc[mask_shifted]
# ts.loc[mask_shifted] = 2
def test_getitem_setitem_slice_integers(self):
s = Series(np.random.randn(8), index=[2, 4, 6, 8, 10, 12, 14, 16])
result = s[:4]
expected = s.reindex([2, 4, 6, 8])
assert_series_equal(result, expected)
s[:4] = 0
assert (s[:4] == 0).all()
assert not (s[4:] == 0).any()
def test_getitem_setitem_datetime_tz_pytz(self):
tm._skip_if_no_pytz()
from pytz import timezone as tz
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
# comparison dates with datetime MUST be localized!
date = tz('US/Central').localize(datetime(1990, 1, 1, 3))
result[date] = 0
result[date] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetime_tz_dateutil(self):
tm._skip_if_no_dateutil()
from dateutil.tz import tzutc
from pandas._libs.tslib import _dateutil_gettz as gettz
tz = lambda x: tzutc() if x == 'UTC' else gettz(
x) # handle special case for utc in dateutil
from pandas import date_range
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H',
tz='America/New_York')
ts = Series(np.random.randn(N), index=rng)
# also test Timestamp tz handling, GH #2789
result = ts.copy()
result["1990-01-01 09:00:00+00:00"] = 0
result["1990-01-01 09:00:00+00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result["1990-01-01 03:00:00-06:00"] = 0
result["1990-01-01 03:00:00-06:00"] = ts[4]
assert_series_equal(result, ts)
# repeat with datetimes
result = ts.copy()
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = 0
result[datetime(1990, 1, 1, 9, tzinfo=tz('UTC'))] = ts[4]
assert_series_equal(result, ts)
result = ts.copy()
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = 0
result[datetime(1990, 1, 1, 3, tzinfo=tz('America/Chicago'))] = ts[4]
assert_series_equal(result, ts)
def test_getitem_setitem_datetimeindex(self):
N = 50
# testing with timezone, GH #2785
rng = date_range('1/1/1990', periods=N, freq='H', tz='US/Eastern')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04:00:00"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04:00:00"] = 0
result["1990-01-01 04:00:00"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04:00:00":"1990-01-01 07:00:00"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = 0
result["1990-01-01 04:00:00":"1990-01-01 07:00:00"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04:00:00"
rb = "1990-01-01 07:00:00"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# repeat all the above with naive datetimes
result = ts[datetime(1990, 1, 1, 4)]
expected = ts[4]
assert result == expected
result = ts.copy()
result[datetime(1990, 1, 1, 4)] = 0
result[datetime(1990, 1, 1, 4)] = ts[4]
assert_series_equal(result, ts)
result = ts[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = 0
result[datetime(1990, 1, 1, 4):datetime(1990, 1, 1, 7)] = ts[4:8]
assert_series_equal(result, ts)
lb = datetime(1990, 1, 1, 4)
rb = datetime(1990, 1, 1, 7)
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
# also test partial date slicing
result = ts["1990-01-02"]
expected = ts[24:48]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-02"] = 0
result["1990-01-02"] = ts[24:48]
assert_series_equal(result, ts)
def test_getitem_setitem_periodindex(self):
from pandas import period_range
N = 50
rng = period_range('1/1/1990', periods=N, freq='H')
ts = Series(np.random.randn(N), index=rng)
result = ts["1990-01-01 04"]
expected = ts[4]
assert result == expected
result = ts.copy()
result["1990-01-01 04"] = 0
result["1990-01-01 04"] = ts[4]
assert_series_equal(result, ts)
result = ts["1990-01-01 04":"1990-01-01 07"]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result["1990-01-01 04":"1990-01-01 07"] = 0
result["1990-01-01 04":"1990-01-01 07"] = ts[4:8]
assert_series_equal(result, ts)
lb = "1990-01-01 04"
rb = "1990-01-01 07"
result = ts[(ts.index >= lb) & (ts.index <= rb)]
expected = ts[4:8]
assert_series_equal(result, expected)
# GH 2782
result = ts[ts.index[4]]
expected = ts[4]
assert result == expected
result = ts[ts.index[4:8]]
expected = ts[4:8]
assert_series_equal(result, expected)
result = ts.copy()
result[ts.index[4:8]] = 0
result[4:8] = ts[4:8]
assert_series_equal(result, ts)
def test_getitem_median_slice_bug(self):
index = date_range('20090415', '20090519', freq='2B')
s = Series(np.random.randn(13), index=index)
indexer = [slice(6, 7, None)]
result = s[indexer]
expected = s[indexer[0]]
assert_series_equal(result, expected)
def test_getitem_out_of_bounds(self):
# don't segfault, GH #495
pytest.raises(IndexError, self.ts.__getitem__, len(self.ts))
# GH #917
s = Series([])
pytest.raises(IndexError, s.__getitem__, -1)
def test_getitem_setitem_integers(self):
# caused bug without test
s = Series([1, 2, 3], ['a', 'b', 'c'])
assert s.iloc[0] == s['a']
s.iloc[0] = 5
tm.assert_almost_equal(s['a'], 5)
def test_getitem_box_float64(self):
value = self.ts[5]
assert isinstance(value, np.float64)
def test_getitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
pytest.raises(KeyError, s.__getitem__, 1)
pytest.raises(KeyError, s.loc.__getitem__, 1)
def test_getitem_unordered_dup(self):
obj = Series(lrange(5), index=['c', 'a', 'a', 'b', 'b'])
assert is_scalar(obj['c'])
assert obj['c'] == 0
def test_getitem_dups_with_missing(self):
# breaks reindex, so need to use .loc internally
# GH 4246
s = Series([1, 2, 3, 4], ['foo', 'bar', 'foo', 'bah'])
expected = s.loc[['foo', 'bar', 'bah', 'bam']]
result = s[['foo', 'bar', 'bah', 'bam']]
assert_series_equal(result, expected)
def test_getitem_dups(self):
s = Series(range(5), index=['A', 'A', 'B', 'C', 'C'], dtype=np.int64)
expected = Series([3, 4], index=['C', 'C'], dtype=np.int64)
result = s['C']
assert_series_equal(result, expected)
def test_getitem_dataframe(self):
rng = list(range(10))
s = pd.Series(10, index=rng)
df = pd.DataFrame(rng, index=rng)
pytest.raises(TypeError, s.__getitem__, df > 5)
def test_getitem_callable(self):
# GH 12533
s = pd.Series(4, index=list('ABCD'))
result = s[lambda x: 'A']
assert result == s.loc['A']
result = s[lambda x: ['A', 'B']]
tm.assert_series_equal(result, s.loc[['A', 'B']])
result = s[lambda x: [True, False, True, True]]
tm.assert_series_equal(result, s.iloc[[0, 2, 3]])
def test_setitem_ambiguous_keyerror(self):
s = Series(lrange(10), index=lrange(0, 20, 2))
# equivalent of an append
s2 = s.copy()
s2[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
s2 = s.copy()
s2.loc[1] = 5
expected = s.append(Series([5], index=[1]))
assert_series_equal(s2, expected)
def test_setitem_float_labels(self):
# note labels are floats
s = Series(['a', 'b', 'c'], index=[0, 0.5, 1])
tmp = s.copy()
s.loc[1] = 'zoo'
tmp.iloc[2] = 'zoo'
assert_series_equal(s, tmp)
def test_setitem_callable(self):
# GH 12533
s = pd.Series([1, 2, 3, 4], index=list('ABCD'))
s[lambda x: 'A'] = -1
tm.assert_series_equal(s, pd.Series([-1, 2, 3, 4], index=list('ABCD')))
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
s = pd.Series([1, 2, -1, 4])
s[s < 0] = inc
expected = pd.Series([1, 2, inc, 4])
tm.assert_series_equal(s, expected)
def test_slice(self):
numSlice = self.series[10:20]
numSliceEnd = self.series[-10:]
objSlice = self.objSeries[10:20]
assert self.series.index[9] not in numSlice.index
assert self.objSeries.index[9] not in objSlice.index
assert len(numSlice) == len(numSlice.index)
assert self.series[numSlice.index[0]] == numSlice[numSlice.index[0]]
assert numSlice.index[1] == self.series.index[11]
assert tm.equalContents(numSliceEnd, np.array(self.series)[-10:])
# Test return view.
sl = self.series[10:20]
sl[:] = 0
assert (self.series[10:20] == 0).all()
def test_slice_can_reorder_not_uniquely_indexed(self):
s = Series(1, index=['a', 'a', 'b', 'b', 'c'])
s[::-1] # it works!
def test_slice_float_get_set(self):
pytest.raises(TypeError, lambda: self.ts[4.0:10.0])
def f():
self.ts[4.0:10.0] = 0
pytest.raises(TypeError, f)
pytest.raises(TypeError, self.ts.__getitem__, slice(4.5, 10.0))
pytest.raises(TypeError, self.ts.__setitem__, slice(4.5, 10.0), 0)
def test_slice_floats2(self):
s = Series(np.random.rand(10), index=np.arange(10, 20, dtype=float))
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
i = np.arange(10, 20, dtype=float)
i[2] = 12.2
s.index = i
assert len(s.loc[12.0:]) == 8
assert len(s.loc[12.5:]) == 7
def test_slice_float64(self):
values = np.arange(10., 50., 2)
index = Index(values)
start, end = values[[5, 15]]
s = Series(np.random.randn(20), index=index)
result = s[start:end]
expected = s.iloc[5:16]
assert_series_equal(result, expected)
result = s.loc[start:end]
assert_series_equal(result, expected)
df = DataFrame(np.random.randn(20, 3), index=index)
result = df[start:end]
expected = df.iloc[5:16]
tm.assert_frame_equal(result, expected)
result = df.loc[start:end]
tm.assert_frame_equal(result, expected)
def test_setitem(self):
self.ts[self.ts.index[5]] = np.NaN
self.ts[[1, 2, 17]] = np.NaN
self.ts[6] = np.NaN
assert np.isnan(self.ts[6])
assert np.isnan(self.ts[2])
self.ts[np.isnan(self.ts)] = 5
assert not np.isnan(self.ts[2])
# caught this bug when writing tests
series = Series(tm.makeIntIndex(20).astype(float),
index=tm.makeIntIndex(20))
series[::2] = 0
assert (series[::2] == 0).all()
# set item that's not contained
s = self.series.copy()
s['foobar'] = 1
app = Series([1], index=['foobar'], name='series')
expected = self.series.append(app)
assert_series_equal(s, expected)
# Test for issue #10193
key = pd.Timestamp('2012-01-01')
series = pd.Series()
series[key] = 47
expected = pd.Series(47, [key])
assert_series_equal(series, expected)
series = pd.Series([], pd.DatetimeIndex([], freq='D'))
series[key] = 47
expected = pd.Series(47, pd.DatetimeIndex([key], freq='D'))
assert_series_equal(series, expected)
def test_setitem_dtypes(self):
# change dtypes
# GH 4463
expected = Series([np.nan, 2, 3])
s = Series([1, 2, 3])
s.iloc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s.loc[0] = np.nan
assert_series_equal(s, expected)
s = Series([1, 2, 3])
s[0] = np.nan
assert_series_equal(s, expected)
s = Series([False])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan]))
s = Series([False, True])
s.loc[0] = np.nan
assert_series_equal(s, Series([np.nan, 1.0]))
def test_set_value(self):
idx = self.ts.index[10]
res = self.ts.set_value(idx, 0)
assert res is self.ts
assert self.ts[idx] == 0
# equiv
s = self.series.copy()
res = s.set_value('foobar', 0)
assert res is s
assert res.index[-1] == 'foobar'
assert res['foobar'] == 0
s = self.series.copy()
s.loc['foobar'] = 0
assert s.index[-1] == 'foobar'
assert s['foobar'] == 0
def test_setslice(self):
sl = self.ts[5:20]
assert len(sl) == len(sl.index)
assert sl.index.is_unique
def test_basic_getitem_setitem_corner(self):
# invalid tuples, e.g. self.ts[:, None] vs. self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2]
with tm.assert_raises_regex(ValueError, 'tuple-index'):
self.ts[:, 2] = 2
# weird lists. [slice(0, 5)] will work but not two slices
result = self.ts[[slice(None, 5)]]
expected = self.ts[:5]
assert_series_equal(result, expected)
# OK
pytest.raises(Exception, self.ts.__getitem__,
[5, slice(None, None)])
pytest.raises(Exception, self.ts.__setitem__,
[5, slice(None, None)], 2)
def test_basic_getitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
result = self.ts[indices]
expected = self.ts.reindex(indices)
assert_series_equal(result, expected)
result = self.ts[indices[0]:indices[2]]
expected = self.ts.loc[indices[0]:indices[2]]
assert_series_equal(result, expected)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 2, 5, 7, 8]
arr_inds = np.array([0, 2, 5, 7, 8])
result = s[inds]
expected = s.reindex(inds)
assert_series_equal(result, expected)
result = s[arr_inds]
expected = s.reindex(arr_inds)
assert_series_equal(result, expected)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
expected = Timestamp('2011-01-01', tz='US/Eastern')
result = s.loc['a']
assert result == expected
result = s.iloc[0]
assert result == expected
result = s['a']
assert result == expected
def test_basic_setitem_with_labels(self):
indices = self.ts.index[[5, 10, 15]]
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices] = 0
exp.loc[indices] = 0
assert_series_equal(cp, exp)
cp = self.ts.copy()
exp = self.ts.copy()
cp[indices[0]:indices[2]] = 0
exp.loc[indices[0]:indices[2]] = 0
assert_series_equal(cp, exp)
# integer indexes, be careful
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
inds = [0, 4, 6]
arr_inds = np.array([0, 4, 6])
cp = s.copy()
exp = s.copy()
s[inds] = 0
s.loc[inds] = 0
assert_series_equal(cp, exp)
cp = s.copy()
exp = s.copy()
s[arr_inds] = 0
s.loc[arr_inds] = 0
assert_series_equal(cp, exp)
inds_notfound = [0, 4, 5, 6]
arr_inds_notfound = np.array([0, 4, 5, 6])
pytest.raises(Exception, s.__setitem__, inds_notfound, 0)
pytest.raises(Exception, s.__setitem__, arr_inds_notfound, 0)
# GH12089
# with tz for values
s = Series(pd.date_range("2011-01-01", periods=3, tz="US/Eastern"),
index=['a', 'b', 'c'])
s2 = s.copy()
expected = Timestamp('2011-01-03', tz='US/Eastern')
s2.loc['a'] = expected
result = s2.loc['a']
assert result == expected
s2 = s.copy()
s2.iloc[0] = expected
result = s2.iloc[0]
assert result == expected
s2 = s.copy()
s2['a'] = expected
result = s2['a']
assert result == expected
def test_loc_getitem(self):
inds = self.series.index[[3, 4, 7]]
assert_series_equal(self.series.loc[inds], self.series.reindex(inds))
assert_series_equal(self.series.iloc[5::2], self.series[5::2])
# slice with indices
d1, d2 = self.ts.index[[5, 15]]
result = self.ts.loc[d1:d2]
expected = self.ts.truncate(d1, d2)
assert_series_equal(result, expected)
# boolean
mask = self.series > self.series.median()
assert_series_equal(self.series.loc[mask], self.series[mask])
# ask for index value
assert self.ts.loc[d1] == self.ts[d1]
assert self.ts.loc[d2] == self.ts[d2]
def test_loc_getitem_not_monotonic(self):
d1, d2 = self.ts.index[[5, 15]]
ts2 = self.ts[::2][[1, 2, 0]]
pytest.raises(KeyError, ts2.loc.__getitem__, slice(d1, d2))
pytest.raises(KeyError, ts2.loc.__setitem__, slice(d1, d2), 0)
def test_loc_getitem_setitem_integer_slice_keyerrors(self):
s = Series(np.random.randn(10), index=lrange(0, 20, 2))
# this is OK
cp = s.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).all()
# so is this
cp = s.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = s.iloc[2:6]
result2 = s.loc[3:11]
expected = s.reindex([4, 6, 8, 10])
assert_series_equal(result, expected)
assert_series_equal(result2, expected)
# non-monotonic, raise KeyError
s2 = s.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, s2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, s2.loc.__setitem__, slice(3, 11), 0)
def test_loc_getitem_iterator(self):
idx = iter(self.series.index[:10])
result = self.series.loc[idx]
assert_series_equal(result, self.series[:10])
def test_setitem_with_tz(self):
for tz in ['US/Eastern', 'UTC', 'Asia/Tokyo']:
orig = pd.Series(pd.date_range('2016-01-01', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2016-01-01 02:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-01-01 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_setitem_with_tz_dst(self):
# GH XXX
tz = 'US/Eastern'
orig = pd.Series(pd.date_range('2016-11-06', freq='H', periods=3,
tz=tz))
assert orig.dtype == 'datetime64[ns, {0}]'.format(tz)
# scalar
s = orig.copy()
s[1] = pd.Timestamp('2011-01-01', tz=tz)
exp = pd.Series([pd.Timestamp('2016-11-06 00:00-04:00', tz=tz),
pd.Timestamp('2011-01-01 00:00-05:00', tz=tz),
pd.Timestamp('2016-11-06 01:00-05:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[1] = pd.Timestamp('2011-01-01', tz=tz)
tm.assert_series_equal(s, exp)
# vector
vals = pd.Series([pd.Timestamp('2011-01-01', tz=tz),
pd.Timestamp('2012-01-01', tz=tz)], index=[1, 2])
assert vals.dtype == 'datetime64[ns, {0}]'.format(tz)
s[[1, 2]] = vals
exp = pd.Series([pd.Timestamp('2016-11-06 00:00', tz=tz),
pd.Timestamp('2011-01-01 00:00', tz=tz),
pd.Timestamp('2012-01-01 00:00', tz=tz)])
tm.assert_series_equal(s, exp)
s = orig.copy()
s.loc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
s = orig.copy()
s.iloc[[1, 2]] = vals
tm.assert_series_equal(s, exp)
def test_where(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(cond).dropna()
rs2 = s[cond]
assert_series_equal(rs, rs2)
rs = s.where(cond, -s)
assert_series_equal(rs, s.abs())
rs = s.where(cond)
assert (s.shape == rs.shape)
assert (rs is not s)
# test alignment
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
expected = s2[cond].reindex(s2.index[:3]).reindex(s2.index)
rs = s2.where(cond[:3])
assert_series_equal(rs, expected)
expected = s2.abs()
expected.iloc[0] = s2[0]
rs = s2.where(cond[:3], -s2)
assert_series_equal(rs, expected)
pytest.raises(ValueError, s.where, 1)
pytest.raises(ValueError, s.where, cond[:3].values, -s)
# GH 2745
s = Series([1, 2])
s[[True, False]] = [0, 1]
expected = Series([0, 2])
assert_series_equal(s, expected)
# failures
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[0, 2, 3])
pytest.raises(ValueError, s.__setitem__, tuple([[[True, False]]]),
[])
# unsafe dtype changes
for dtype in [np.int8, np.int16, np.int32, np.int64, np.float16,
np.float32, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype=dtype)
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# these are allowed operations, but are upcasted
for dtype in [np.int64, np.float64]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
s[mask] = values
expected = Series(values + lrange(5, 10), dtype='float64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
# GH 9731
s = Series(np.arange(10), dtype='int64')
mask = s > 5
values = [2.5, 3.5, 4.5, 5.5]
s[mask] = values
expected = Series(lrange(6) + values, dtype='float64')
assert_series_equal(s, expected)
# can't do these as we are forced to change the itemsize of the input
# to something we cannot
for dtype in [np.int8, np.int16, np.int32, np.float16, np.float32]:
s = Series(np.arange(10), dtype=dtype)
mask = s < 5
values = [2.5, 3.5, 4.5, 5.5, 6.5]
pytest.raises(Exception, s.__setitem__, tuple(mask), values)
# GH3235
s = Series(np.arange(10), dtype='int64')
mask = s < 5
s[mask] = lrange(2, 7)
expected = Series(lrange(2, 7) + lrange(5, 10), dtype='int64')
assert_series_equal(s, expected)
assert s.dtype == expected.dtype
s = Series(np.arange(10), dtype='int64')
mask = s > 5
s[mask] = [0] * 4
expected = Series([0, 1, 2, 3, 4, 5] + [0] * 4, dtype='int64')
assert_series_equal(s, expected)
s = Series(np.arange(10))
mask = s > 5
def f():
s[mask] = [5, 4, 3, 2, 1]
pytest.raises(ValueError, f)
def f():
s[mask] = [0] * 5
pytest.raises(ValueError, f)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.where(s > 2, np.nan)
expected = Series([np.nan, np.nan, 3, 4])
assert_series_equal(result, expected)
# GH 4667
# setting with None changes dtype
s = Series(range(10)).astype(float)
s[8] = None
result = s[8]
assert isnull(result)
s = Series(range(10)).astype(float)
s[s > 8] = None
result = s[isnull(s)]
expected = Series(np.nan, index=[9])
assert_series_equal(result, expected)
def test_where_array_like(self):
# see gh-15414
s = Series([1, 2, 3])
cond = [False, True, True]
expected = Series([np.nan, 2, 3])
klasses = [list, tuple, np.array, Series]
for klass in klasses:
result = s.where(klass(cond))
assert_series_equal(result, expected)
def test_where_invalid_input(self):
# see gh-15414: only boolean arrays accepted
s = Series([1, 2, 3])
msg = "Boolean array expected for the condition"
conds = [
[1, 0, 1],
Series([2, 5, 7]),
["True", "False", "True"],
[Timestamp("2017-01-01"),
pd.NaT, Timestamp("2017-01-02")]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
msg = "Array conditional must be same shape as self"
with tm.assert_raises_regex(ValueError, msg):
s.where([True])
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
s = Series([1, 2, 3])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([1, np.nan, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
s.where(cond)
expected = Series([np.nan, 2, np.nan])
out = s.where(Series(cond))
tm.assert_series_equal(out, expected)
def test_where_setitem_invalid(self):
# GH 2702
# make sure correct exceptions are raised on invalid list assignment
# slice
s = Series(list('abc'))
def f():
s[0:3] = list(range(27))
pytest.raises(ValueError, f)
s[0:3] = list(range(3))
expected = Series([0, 1, 2])
assert_series_equal(s.astype(np.int64), expected, )
# slice with step
s = Series(list('abcdef'))
def f():
s[0:4:2] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abcdef'))
s[0:4:2] = list(range(2))
expected = Series([0, 'b', 1, 'd', 'e', 'f'])
assert_series_equal(s, expected)
# neg slices
s = Series(list('abcdef'))
def f():
s[:-1] = list(range(27))
pytest.raises(ValueError, f)
s[-3:-1] = list(range(2))
expected = Series(['a', 'b', 'c', 0, 1, 'f'])
assert_series_equal(s, expected)
# list
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(27))
pytest.raises(ValueError, f)
s = Series(list('abc'))
def f():
s[[0, 1, 2]] = list(range(2))
pytest.raises(ValueError, f)
# scalar
s = Series(list('abc'))
s[0] = list(range(10))
expected = Series([list(range(10)), 'b', 'c'])
assert_series_equal(s, expected)
def test_where_broadcast(self):
# Test a variety of differently sized series
for size in range(2, 6):
# Test a variety of boolean indices
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
# Test a variety of different numbers as content
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
# Test numpy arrays, lists and tuples as the input to be
# broadcast
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
s[selection] = arr
# Construct the expected series by taking the source
# data or item based on the selection
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(s, expected)
s = Series(data)
result = s.where(~selection, arr)
assert_series_equal(result, expected)
def test_where_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.where(cond, inplace=True)
assert_series_equal(rs.dropna(), s[cond])
assert_series_equal(rs, s.where(cond))
rs = s.copy()
rs.where(cond, -s, inplace=True)
assert_series_equal(rs, s.where(cond, -s))
def test_where_dups(self):
# GH 4550
# where crashes with dups in index
s1 = Series(list(range(3)))
s2 = Series(list(range(3)))
comb = pd.concat([s1, s2])
result = comb.where(comb < 2)
expected = Series([0, 1, np.nan, 0, 1, np.nan],
index=[0, 1, 2, 0, 1, 2])
assert_series_equal(result, expected)
# GH 4548
# inplace updating not working with dups
comb[comb < 1] = 5
expected = Series([5, 1, 2, 5, 1, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
comb[comb < 2] += 10
expected = Series([5, 11, 2, 5, 11, 2], index=[0, 1, 2, 0, 1, 2])
assert_series_equal(comb, expected)
def test_where_datetime(self):
s = Series(date_range('20130102', periods=2))
expected = Series([10, 10], dtype='datetime64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='datetime64[ns]')
assert_series_equal(rs, expected)
# GH 15701
timestamps = ['2016-12-31 12:00:04+00:00',
'2016-12-31 12:00:04.010000+00:00']
s = Series([pd.Timestamp(t) for t in timestamps])
rs = s.where(Series([False, True]))
expected = Series([pd.NaT, s[1]])
assert_series_equal(rs, expected)
def test_where_timedelta(self):
s = Series([1, 2], dtype='timedelta64[ns]')
expected = Series([10, 10], dtype='timedelta64[ns]')
mask = np.array([False, False])
rs = s.where(mask, [10, 10])
assert_series_equal(rs, expected)
rs = s.where(mask, 10)
assert_series_equal(rs, expected)
rs = s.where(mask, 10.0)
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, 10.0])
assert_series_equal(rs, expected)
rs = s.where(mask, [10.0, np.nan])
expected = Series([10, None], dtype='timedelta64[ns]')
assert_series_equal(rs, expected)
def test_mask(self):
# compare with tested results in test_where
s = Series(np.random.randn(5))
cond = s > 0
rs = s.where(~cond, np.nan)
assert_series_equal(rs, s.mask(cond))
rs = s.where(~cond)
rs2 = s.mask(cond)
assert_series_equal(rs, rs2)
rs = s.where(~cond, -s)
rs2 = s.mask(cond, -s)
assert_series_equal(rs, rs2)
cond = Series([True, False, False, True, False], index=s.index)
s2 = -(s.abs())
rs = s2.where(~cond[:3])
rs2 = s2.mask(cond[:3])
assert_series_equal(rs, rs2)
rs = s2.where(~cond[:3], -s2)
rs2 = s2.mask(cond[:3], -s2)
assert_series_equal(rs, rs2)
pytest.raises(ValueError, s.mask, 1)
pytest.raises(ValueError, s.mask, cond[:3].values, -s)
# dtype changes
s = Series([1, 2, 3, 4])
result = s.mask(s > 2, np.nan)
expected = Series([1, 2, np.nan, np.nan])
assert_series_equal(result, expected)
def test_mask_broadcast(self):
# GH 8801
# copied from test_where_broadcast
for size in range(2, 6):
for selection in [
# First element should be set
np.resize([True, False, False, False, False], size),
# Set alternating elements]
np.resize([True, False], size),
# No element should be set
np.resize([False], size)]:
for item in [2.0, np.nan, np.finfo(np.float).max,
np.finfo(np.float).min]:
for arr in [np.array([item]), [item], (item, )]:
data = np.arange(size, dtype=float)
s = Series(data)
result = s.mask(selection, arr)
expected = Series([item if use_item else data[
i] for i, use_item in enumerate(selection)])
assert_series_equal(result, expected)
def test_mask_inplace(self):
s = Series(np.random.randn(5))
cond = s > 0
rs = s.copy()
rs.mask(cond, inplace=True)
assert_series_equal(rs.dropna(), s[~cond])
assert_series_equal(rs, s.mask(cond))
rs = s.copy()
rs.mask(cond, -s, inplace=True)
assert_series_equal(rs, s.mask(cond, -s))
def test_ix_setitem(self):
inds = self.series.index[[3, 4, 7]]
result = self.series.copy()
result.loc[inds] = 5
expected = self.series.copy()
expected[[3, 4, 7]] = 5
assert_series_equal(result, expected)
result.iloc[5:10] = 10
expected[5:10] = 10
assert_series_equal(result, expected)
# set slice with indices
d1, d2 = self.series.index[[5, 15]]
result.loc[d1:d2] = 6
expected[5:16] = 6 # because it's inclusive
assert_series_equal(result, expected)
# set index value
self.series.loc[d1] = 4
self.series.loc[d2] = 6
assert self.series[d1] == 4
assert self.series[d2] == 6
def test_where_numeric_with_string(self):
# GH 9280
s = pd.Series([1, 2, 3])
w = s.where(s > 1, 'X')
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, ['X', 'Y', 'Z'])
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
w = s.where(s > 1, np.array(['X', 'Y', 'Z']))
assert not is_integer(w[0])
assert is_integer(w[1])
assert is_integer(w[2])
assert isinstance(w[0], str)
assert w.dtype == 'object'
def test_setitem_boolean(self):
mask = self.series > self.series.median()
# similiar indexed series
result = self.series.copy()
result[mask] = self.series * 2
expected = self.series * 2
assert_series_equal(result[mask], expected[mask])
# needs alignment
result = self.series.copy()
result[mask] = (self.series * 2)[0:5]
expected = (self.series * 2)[0:5].reindex_like(self.series)
expected[-mask] = self.series[mask]
assert_series_equal(result[mask], expected[mask])
def test_ix_setitem_boolean(self):
mask = self.series > self.series.median()
result = self.series.copy()
result.loc[mask] = 0
expected = self.series
expected[mask] = 0
assert_series_equal(result, expected)
def test_ix_setitem_corner(self):
inds = list(self.series.index[[5, 8, 12]])
self.series.loc[inds] = 5
pytest.raises(Exception, self.series.loc.__setitem__,
inds + ['foo'], 5)
def test_get_set_boolean_different_order(self):
ordered = self.series.sort_values()
# setting
copy = self.series.copy()
copy[ordered > 0] = 0
expected = self.series.copy()
expected[expected > 0] = 0
assert_series_equal(copy, expected)
# getting
sel = self.series[ordered > 0]
exp = self.series[self.series > 0]
assert_series_equal(sel, exp)
def test_setitem_na(self):
# these induce dtype changes
expected = Series([np.nan, 3, np.nan, 5, np.nan, 7, np.nan, 9, np.nan])
s = Series([2, 3, 4, 5, 6, 7, 8, 9, 10])
s[::2] = np.nan
assert_series_equal(s, expected)
# get's coerced to float, right?
expected = Series([np.nan, 1, np.nan, 0])
s = Series([True, True, False, False])
s[::2] = np.nan
assert_series_equal(s, expected)
expected = Series([np.nan, np.nan, np.nan, np.nan, np.nan, 5, 6, 7, 8,
9])
s = Series(np.arange(10))
s[:5] = np.nan
assert_series_equal(s, expected)
def test_basic_indexing(self):
s = Series(np.random.randn(5), index=['a', 'b', 'a', 'a', 'b'])
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
pytest.raises(KeyError, s.__getitem__, 'c')
s = s.sort_index()
pytest.raises(IndexError, s.__getitem__, 5)
pytest.raises(IndexError, s.__setitem__, 5, 0)
def test_int_indexing(self):
s = Series(np.random.randn(6), index=[0, 0, 1, 1, 2, 2])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
# not monotonic
s = Series(np.random.randn(6), index=[2, 2, 0, 0, 1, 1])
pytest.raises(KeyError, s.__getitem__, 5)
pytest.raises(KeyError, s.__getitem__, 'c')
def test_datetime_indexing(self):
from pandas import date_range
index = date_range('1/1/2000', '1/7/2000')
index = index.repeat(3)
s = Series(len(index), index=index)
stamp = Timestamp('1/8/2000')
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
# not monotonic
s = Series(len(index), index=index)
s = s[::-1]
pytest.raises(KeyError, s.__getitem__, stamp)
s[stamp] = 0
assert s[stamp] == 0
def test_timedelta_assignment(self):
# GH 8209
s = Series([])
s.loc['B'] = timedelta(1)
tm.assert_series_equal(s, Series(Timedelta('1 days'), index=['B']))
s = s.reindex(s.index.insert(0, 'A'))
tm.assert_series_equal(s, Series(
[np.nan, Timedelta('1 days')], index=['A', 'B']))
result = s.fillna(timedelta(1))
expected = Series(Timedelta('1 days'), index=['A', 'B'])
tm.assert_series_equal(result, expected)
s.loc['A'] = timedelta(1)
tm.assert_series_equal(s, expected)
# GH 14155
s = Series(10 * [np.timedelta64(10, 'm')])
s.loc[[1, 2, 3]] = np.timedelta64(20, 'm')
expected = pd.Series(10 * [np.timedelta64(10, 'm')])
expected.loc[[1, 2, 3]] = pd.Timedelta(np.timedelta64(20, 'm'))
tm.assert_series_equal(s, expected)
def test_underlying_data_conversion(self):
# GH 4080
df = DataFrame(dict((c, [1, 2, 3]) for c in ['a', 'b', 'c']))
df.set_index(['a', 'b', 'c'], inplace=True)
s = Series([1], index=[(2, 2, 2)])
df['val'] = 0
df
df['val'].update(s)
expected = DataFrame(
dict(a=[1, 2, 3], b=[1, 2, 3], c=[1, 2, 3], val=[0, 1, 0]))
expected.set_index(['a', 'b', 'c'], inplace=True)
tm.assert_frame_equal(df, expected)
# GH 3970
# these are chained assignments as well
pd.set_option('chained_assignment', None)
df = DataFrame({"aa": range(5), "bb": [2.2] * 5})
df["cc"] = 0.0
ck = [True] * len(df)
df["bb"].iloc[0] = .13
# TODO: unused
df_tmp = df.iloc[ck] # noqa
df["bb"].iloc[0] = .15
assert df['bb'].iloc[0] == 0.15
pd.set_option('chained_assignment', 'raise')
# GH 3217
df = DataFrame(dict(a=[1, 3], b=[np.nan, 2]))
df['c'] = np.nan
df['c'].update(pd.Series(['foo'], index=[0]))
expected = DataFrame(dict(a=[1, 3], b=[np.nan, 2], c=['foo', np.nan]))
tm.assert_frame_equal(df, expected)
def test_preserveRefs(self):
seq = self.ts[[5, 10, 15]]
seq[1] = np.NaN
assert not np.isnan(self.ts[10])
def test_drop(self):
# unique
s = Series([1, 2], index=['one', 'two'])
expected = Series([1], index=['one'])
result = s.drop(['two'])
assert_series_equal(result, expected)
result = s.drop('two', axis='rows')
assert_series_equal(result, expected)
# non-unique
# GH 5248
s = Series([1, 1, 2], index=['one', 'two', 'one'])
expected = Series([1, 2], index=['one', 'one'])
result = s.drop(['two'], axis=0)
assert_series_equal(result, expected)
result = s.drop('two')
assert_series_equal(result, expected)
expected = Series([1], index=['two'])
result = s.drop(['one'])
assert_series_equal(result, expected)
result = s.drop('one')
assert_series_equal(result, expected)
# single string/tuple-like
s = Series(range(3), index=list('abc'))
pytest.raises(ValueError, s.drop, 'bc')
pytest.raises(ValueError, s.drop, ('a', ))
# errors='ignore'
s = Series(range(3), index=list('abc'))
result = s.drop('bc', errors='ignore')
assert_series_equal(result, s)
result = s.drop(['a', 'd'], errors='ignore')
expected = s.iloc[1:]
assert_series_equal(result, expected)
# bad axis
pytest.raises(ValueError, s.drop, 'one', axis='columns')
# GH 8522
s = Series([2, 3], index=[True, False])
assert s.index.is_object()
result = s.drop(True)
expected = Series([3], index=[False])
assert_series_equal(result, expected)
def test_align(self):
def _check_align(a, b, how='left', fill=None):
aa, ab = a.align(b, join=how, fill_value=fill)
join_index = a.index.join(b.index, how=how)
if fill is not None:
diff_a = aa.index.difference(join_index)
diff_b = ab.index.difference(join_index)
if len(diff_a) > 0:
assert (aa.reindex(diff_a) == fill).all()
if len(diff_b) > 0:
assert (ab.reindex(diff_b) == fill).all()
ea = a.reindex(join_index)
eb = b.reindex(join_index)
if fill is not None:
ea = ea.fillna(fill)
eb = eb.fillna(fill)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
assert aa.name == 'ts'
assert ea.name == 'ts'
assert ab.name == 'ts'
assert eb.name == 'ts'
for kind in JOIN_TYPES:
_check_align(self.ts[2:], self.ts[:-5], how=kind)
_check_align(self.ts[2:], self.ts[:-5], how=kind, fill=-1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind)
_check_align(self.ts[:0], self.ts[:-5], how=kind, fill=-1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind)
_check_align(self.ts[:-5], self.ts[:0], how=kind, fill=-1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind)
_check_align(self.ts[:0], self.ts[:0], how=kind, fill=-1)
def test_align_fill_method(self):
def _check_align(a, b, how='left', method='pad', limit=None):
aa, ab = a.align(b, join=how, method=method, limit=limit)
join_index = a.index.join(b.index, how=how)
ea = a.reindex(join_index)
eb = b.reindex(join_index)
ea = ea.fillna(method=method, limit=limit)
eb = eb.fillna(method=method, limit=limit)
assert_series_equal(aa, ea)
assert_series_equal(ab, eb)
for kind in JOIN_TYPES:
for meth in ['pad', 'bfill']:
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[2:], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty left
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:-5], how=kind, method=meth,
limit=1)
# empty right
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:-5], self.ts[:0], how=kind, method=meth,
limit=1)
# both empty
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth)
_check_align(self.ts[:0], self.ts[:0], how=kind, method=meth,
limit=1)
def test_align_nocopy(self):
b = self.ts[:5].copy()
# do copy
a = self.ts.copy()
ra, _ = a.align(b, join='left')
ra[:5] = 5
assert not (a[:5] == 5).any()
# do not copy
a = self.ts.copy()
ra, _ = a.align(b, join='left', copy=False)
ra[:5] = 5
assert (a[:5] == 5).all()
# do copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right')
rb[:3] = 5
assert not (b[:3] == 5).any()
# do not copy
a = self.ts.copy()
b = self.ts[:5].copy()
_, rb = a.align(b, join='right', copy=False)
rb[:2] = 5
assert (b[:2] == 5).all()
def test_align_same_index(self):
a, b = self.ts.align(self.ts, copy=False)
assert a.index is self.ts.index
assert b.index is self.ts.index
a, b = self.ts.align(self.ts, copy=True)
assert a.index is not self.ts.index
assert b.index is not self.ts.index
def test_align_multiindex(self):
# GH 10665
midx = pd.MultiIndex.from_product([range(2), range(3), range(2)],
names=('a', 'b', 'c'))
idx = pd.Index(range(2), name='b')
s1 = pd.Series(np.arange(12, dtype='int64'), index=midx)
s2 = pd.Series(np.arange(2, dtype='int64'), index=idx)
# these must be the same results (but flipped)
res1l, res1r = s1.align(s2, join='left')
res2l, res2r = s2.align(s1, join='right')
expl = s1
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1, np.nan, np.nan] * 2, index=midx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
res1l, res1r = s1.align(s2, join='right')
res2l, res2r = s2.align(s1, join='left')
exp_idx = pd.MultiIndex.from_product([range(2), range(2), range(2)],
names=('a', 'b', 'c'))
expl = pd.Series([0, 1, 2, 3, 6, 7, 8, 9], index=exp_idx)
tm.assert_series_equal(expl, res1l)
tm.assert_series_equal(expl, res2r)
expr = pd.Series([0, 0, 1, 1] * 2, index=exp_idx)
tm.assert_series_equal(expr, res1r)
tm.assert_series_equal(expr, res2l)
def test_reindex(self):
identity = self.series.reindex(self.series.index)
# __array_interface__ is not defined for older numpies
# and on some pythons
try:
assert np.may_share_memory(self.series.index, identity.index)
except AttributeError:
pass
assert identity.index.is_(self.series.index)
assert identity.index.identical(self.series.index)
subIndex = self.series.index[10:20]
subSeries = self.series.reindex(subIndex)
for idx, val in compat.iteritems(subSeries):
assert val == self.series[idx]
subIndex2 = self.ts.index[10:20]
subTS = self.ts.reindex(subIndex2)
for idx, val in compat.iteritems(subTS):
assert val == self.ts[idx]
stuffSeries = self.ts.reindex(subIndex)
assert np.isnan(stuffSeries).all()
# This is extremely important for the Cython code to not screw up
nonContigIndex = self.ts.index[::2]
subNonContig = self.ts.reindex(nonContigIndex)
for idx, val in compat.iteritems(subNonContig):
assert val == self.ts[idx]
# return a copy the same index here
result = self.ts.reindex()
assert not (result is self.ts)
def test_reindex_nan(self):
ts = Series([2, 3, 5, 7], index=[1, 4, nan, 8])
i, j = [nan, 1, nan, 8, 4, nan], [2, 0, 2, 3, 1, 2]
assert_series_equal(ts.reindex(i), ts.iloc[j])
ts.index = ts.index.astype('object')
# reindex coerces index.dtype to float, loc/iloc doesn't
assert_series_equal(ts.reindex(i), ts.iloc[j], check_index_type=False)
def test_reindex_series_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
series = Series(rng)
result = series.reindex(lrange(15))
assert np.issubdtype(result.dtype, np.dtype('M8[ns]'))
mask = result.isnull()
assert mask[-5:].all()
assert not mask[:-5].any()
def test_reindex_with_datetimes(self):
rng = date_range('1/1/2000', periods=20)
ts = Series(np.random.randn(20), index=rng)
result = ts.reindex(list(ts.index[5:10]))
expected = ts[5:10]
tm.assert_series_equal(result, expected)
result = ts[list(ts.index[5:10])]
tm.assert_series_equal(result, expected)
def test_reindex_corner(self):
# (don't forget to fix this) I think it's fixed
self.empty.reindex(self.ts.index, method='pad') # it works
# corner case: pad empty series
reindexed = self.empty.reindex(self.ts.index, method='pad')
# pass non-Index
reindexed = self.ts.reindex(list(self.ts.index))
assert_series_equal(self.ts, reindexed)
# bad fill method
ts = self.ts[::2]
pytest.raises(Exception, ts.reindex, self.ts.index, method='foo')
def test_reindex_pad(self):
s = Series(np.arange(10), dtype='int64')
s2 = s[::2]
reindexed = s2.reindex(s.index, method='pad')
reindexed2 = s2.reindex(s.index, method='ffill')
assert_series_equal(reindexed, reindexed2)
expected = Series([0, 0, 2, 2, 4, 4, 6, 6, 8, 8], index=np.arange(10))
assert_series_equal(reindexed, expected)
# GH4604
s = Series([1, 2, 3, 4, 5], index=['a', 'b', 'c', 'd', 'e'])
new_index = ['a', 'g', 'c', 'f']
expected = Series([1, 1, 3, 3], index=new_index)
# this changes dtype because the ffill happens after
result = s.reindex(new_index).ffill()
assert_series_equal(result, expected.astype('float64'))
result = s.reindex(new_index).ffill(downcast='infer')
assert_series_equal(result, expected)
expected = Series([1, 5, 3, 5], index=new_index)
result = s.reindex(new_index, method='ffill')
assert_series_equal(result, expected)
# inferrence of new dtype
s = Series([True, False, False, True], index=list('abcd'))
new_index = 'agc'
result = s.reindex(list(new_index)).ffill()
expected = Series([True, True, False], index=list(new_index))
assert_series_equal(result, expected)
# GH4618 shifted series downcasting
s = Series(False, index=lrange(0, 5))
result = s.shift(1).fillna(method='bfill')
expected = Series(False, index=lrange(0, 5))
assert_series_equal(result, expected)
def test_reindex_nearest(self):
s = Series(np.arange(10, dtype='int64'))
target = [0.1, 0.9, 1.5, 2.0]
actual = s.reindex(target, method='nearest')
expected = Series(np.around(target).astype('int64'), target)
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest')
assert_series_equal(expected, actual)
actual = s.reindex_like(actual, method='nearest', tolerance=1)
assert_series_equal(expected, actual)
actual = s.reindex(target, method='nearest', tolerance=0.2)
expected = Series([0, 1, np.nan, 2], target)
assert_series_equal(expected, actual)
def test_reindex_backfill(self):
pass
def test_reindex_int(self):
ts = self.ts[::2]
int_ts = Series(np.zeros(len(ts), dtype=int), index=ts.index)
# this should work fine
reindexed_int = int_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_int.dtype == np.float_
# NO NaNs introduced
reindexed_int = int_ts.reindex(int_ts.index[::2])
assert reindexed_int.dtype == np.int_
def test_reindex_bool(self):
# A series other than float, int, string, or object
ts = self.ts[::2]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
# this should work fine
reindexed_bool = bool_ts.reindex(self.ts.index)
# if NaNs introduced
assert reindexed_bool.dtype == np.object_
# NO NaNs introduced
reindexed_bool = bool_ts.reindex(bool_ts.index[::2])
assert reindexed_bool.dtype == np.bool_
def test_reindex_bool_pad(self):
# fail
ts = self.ts[5:]
bool_ts = Series(np.zeros(len(ts), dtype=bool), index=ts.index)
filled_bool = bool_ts.reindex(self.ts.index, method='pad')
assert isnull(filled_bool[:5]).all()
def test_reindex_like(self):
other = self.ts[::2]
assert_series_equal(self.ts.reindex(other.index),
self.ts.reindex_like(other))
# GH 7179
day1 = datetime(2013, 3, 5)
day2 = datetime(2013, 5, 5)
day3 = datetime(2014, 3, 5)
series1 = Series([5, None, None], [day1, day2, day3])
series2 = Series([None, None], [day1, day3])
result = series1.reindex_like(series2, method='pad')
expected = Series([5, np.nan], index=[day1, day3])
assert_series_equal(result, expected)
def test_reindex_fill_value(self):
# -----------------------------------------------------------
# floats
floats = Series([1., 2., 3.])
result = floats.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
result = floats.reindex([1, 2, 3], fill_value=0)
expected = Series([2., 3., 0], index=[1, 2, 3])
assert_series_equal(result, expected)
# -----------------------------------------------------------
# ints
ints = Series([1, 2, 3])
result = ints.reindex([1, 2, 3])
expected = Series([2., 3., np.nan], index=[1, 2, 3])
assert_series_equal(result, expected)
# don't upcast
result = ints.reindex([1, 2, 3], fill_value=0)
expected = Series([2, 3, 0], index=[1, 2, 3])
assert issubclass(result.dtype.type, np.integer)
assert_series_equal(result, expected)
# -----------------------------------------------------------
# objects
objects = Series([1, 2, 3], dtype=object)
result = objects.reindex([1, 2, 3])
expected = Series([2, 3, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = objects.reindex([1, 2, 3], fill_value='foo')
expected = Series([2, 3, 'foo'], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
# ------------------------------------------------------------
# bools
bools = Series([True, False, True])
result = bools.reindex([1, 2, 3])
expected = Series([False, True, np.nan], index=[1, 2, 3], dtype=object)
assert_series_equal(result, expected)
result = bools.reindex([1, 2, 3], fill_value=False)
expected = Series([False, True, False], index=[1, 2, 3])
assert_series_equal(result, expected)
def test_select(self):
n = len(self.ts)
result = self.ts.select(lambda x: x >= self.ts.index[n // 2])
expected = self.ts.reindex(self.ts.index[n // 2:])
assert_series_equal(result, expected)
result = self.ts.select(lambda x: x.weekday() == 2)
expected = self.ts[self.ts.index.weekday == 2]
assert_series_equal(result, expected)
def test_cast_on_putmask(self):
# GH 2746
# need to upcast
s = Series([1, 2], index=[1, 2], dtype='int64')
s[[True, False]] = Series([0], index=[1], dtype='int64')
expected = Series([0, 2], index=[1, 2], dtype='int64')
assert_series_equal(s, expected)
def test_type_promote_putmask(self):
# GH8387: test that changing types does not break alignment
ts = Series(np.random.randn(100), index=np.arange(100, 0, -1)).round(5)
left, mask = ts.copy(), ts > 0
right = ts[mask].copy().map(str)
left[mask] = right
assert_series_equal(left, ts.map(lambda t: str(t) if t > 0 else t))
s = Series([0, 1, 2, 0])
mask = s > 0
s2 = s[mask].map(str)
s[mask] = s2
assert_series_equal(s, Series([0, '1', '2', 0]))
s = Series([0, 'foo', 'bar', 0])
mask = Series([False, True, True, False])
s2 = s[mask]
s[mask] = s2
assert_series_equal(s, Series([0, 'foo', 'bar', 0]))
def test_head_tail(self):
assert_series_equal(self.series.head(), self.series[:5])
assert_series_equal(self.series.head(0), self.series[0:0])
assert_series_equal(self.series.tail(), self.series[-5:])
assert_series_equal(self.series.tail(0), self.series[0:0])
def test_multilevel_preserve_name(self):
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
s = Series(np.random.randn(len(index)), index=index, name='sth')
result = s['foo']
result2 = s.loc['foo']
assert result.name == s.name
assert result2.name == s.name
def test_setitem_scalar_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
for n in range(len(series)):
with pytest.raises(ValueError):
series[n] = 1
assert array[n] == 0
def test_setitem_slice_into_readonly_backing_data(self):
# GH14359: test that you cannot mutate a read only buffer
array = np.zeros(5)
array.flags.writeable = False # make the array immutable
series = Series(array)
with pytest.raises(ValueError):
series[1:3] = 1
assert not array.any()
class TestTimeSeriesDuplicates(object):
def setup_method(self, method):
dates = [datetime(2000, 1, 2), datetime(2000, 1, 2),
datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 3), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 4),
datetime(2000, 1, 4), datetime(2000, 1, 5)]
self.dups = Series(np.random.randn(len(dates)), index=dates)
def test_constructor(self):
assert isinstance(self.dups, Series)
assert isinstance(self.dups.index, DatetimeIndex)
def test_is_unique_monotonic(self):
assert not self.dups.index.is_unique
def test_index_unique(self):
uniques = self.dups.index.unique()
expected = DatetimeIndex([datetime(2000, 1, 2), datetime(2000, 1, 3),
datetime(2000, 1, 4), datetime(2000, 1, 5)])
assert uniques.dtype == 'M8[ns]' # sanity
tm.assert_index_equal(uniques, expected)
assert self.dups.index.nunique() == 4
# #2563
assert isinstance(uniques, DatetimeIndex)
dups_local = self.dups.index.tz_localize('US/Eastern')
dups_local.name = 'foo'
result = dups_local.unique()
expected = DatetimeIndex(expected, name='foo')
expected = expected.tz_localize('US/Eastern')
assert result.tz is not None
assert result.name == 'foo'
tm.assert_index_equal(result, expected)
# NaT, note this is excluded
arr = [1370745748 + t for t in range(20)] + [tslib.iNaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
arr = [Timestamp('2013-06-09 02:42:28') + timedelta(seconds=t)
for t in range(20)] + [NaT]
idx = DatetimeIndex(arr * 3)
tm.assert_index_equal(idx.unique(), DatetimeIndex(arr))
assert idx.nunique() == 20
assert idx.nunique(dropna=False) == 21
def test_index_dupes_contains(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
assert d in ix
def test_duplicate_dates_indexing(self):
ts = self.dups
uniques = ts.index.unique()
for date in uniques:
result = ts[date]
mask = ts.index == date
total = (ts.index == date).sum()
expected = ts[mask]
if total > 1:
assert_series_equal(result, expected)
else:
assert_almost_equal(result, expected[0])
cp = ts.copy()
cp[date] = 0
expected = Series(np.where(mask, 0, ts), index=ts.index)
assert_series_equal(cp, expected)
pytest.raises(KeyError, ts.__getitem__, datetime(2000, 1, 6))
# new index
ts[datetime(2000, 1, 6)] = 0
assert ts[datetime(2000, 1, 6)] == 0
def test_range_slice(self):
idx = DatetimeIndex(['1/1/2000', '1/2/2000', '1/2/2000', '1/3/2000',
'1/4/2000'])
ts = Series(np.random.randn(len(idx)), index=idx)
result = ts['1/2/2000':]
expected = ts[1:]
assert_series_equal(result, expected)
result = ts['1/2/2000':'1/3/2000']
expected = ts[1:4]
assert_series_equal(result, expected)
def test_groupby_average_dup_values(self):
result = self.dups.groupby(level=0).mean()
expected = self.dups.groupby(self.dups.index).mean()
assert_series_equal(result, expected)
def test_indexing_over_size_cutoff(self):
import datetime
# #1821
old_cutoff = _index._SIZE_CUTOFF
try:
_index._SIZE_CUTOFF = 1000
# create large list of non periodic datetime
dates = []
sec = datetime.timedelta(seconds=1)
half_sec = datetime.timedelta(microseconds=500000)
d = datetime.datetime(2011, 12, 5, 20, 30)
n = 1100
for i in range(n):
dates.append(d)
dates.append(d + sec)
dates.append(d + sec + half_sec)
dates.append(d + sec + sec + half_sec)
d += 3 * sec
# duplicate some values in the list
duplicate_positions = np.random.randint(0, len(dates) - 1, 20)
for p in duplicate_positions:
dates[p + 1] = dates[p]
df = DataFrame(np.random.randn(len(dates), 4),
index=dates,
columns=list('ABCD'))
pos = n * 3
timestamp = df.index[pos]
assert timestamp in df.index
# it works!
df.loc[timestamp]
assert len(df.loc[[timestamp]]) > 0
finally:
_index._SIZE_CUTOFF = old_cutoff
def test_indexing_unordered(self):
# GH 2437
rng = date_range(start='2011-01-01', end='2011-01-15')
ts = Series(np.random.rand(len(rng)), index=rng)
ts2 = pd.concat([ts[0:4], ts[-4:], ts[4:-4]])
for t in ts.index:
# TODO: unused?
s = str(t) # noqa
expected = ts[t]
result = ts2[t]
assert expected == result
# GH 3448 (ranges)
def compare(slobj):
result = ts2[slobj].copy()
result = result.sort_index()
expected = ts[slobj]
assert_series_equal(result, expected)
compare(slice('2011-01-01', '2011-01-15'))
compare(slice('2010-12-30', '2011-01-15'))
compare(slice('2011-01-01', '2011-01-16'))
# partial ranges
compare(slice('2011-01-01', '2011-01-6'))
compare(slice('2011-01-06', '2011-01-8'))
compare(slice('2011-01-06', '2011-01-12'))
# single values
result = ts2['2011'].sort_index()
expected = ts['2011']
assert_series_equal(result, expected)
# diff freq
rng = date_range(datetime(2005, 1, 1), periods=20, freq='M')
ts = Series(np.arange(len(rng)), index=rng)
ts = ts.take(np.random.permutation(20))
result = ts['2005']
for t in result.index:
assert t.year == 2005
def test_indexing(self):
idx = date_range("2001-1-1", periods=20, freq='M')
ts = Series(np.random.rand(len(idx)), index=idx)
# getting
# GH 3070, make sure semantics work on Series/Frame
expected = ts['2001']
expected.name = 'A'
df = DataFrame(dict(A=ts))
result = df['2001']['A']
assert_series_equal(expected, result)
# setting
ts['2001'] = 1
expected = ts['2001']
expected.name = 'A'
df.loc['2001', 'A'] = 1
result = df['2001']['A']
assert_series_equal(expected, result)
# GH3546 (not including times on the last day)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:00',
freq='H')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = date_range(start='2013-05-31 00:00', end='2013-05-31 23:59',
freq='S')
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013-05']
assert_series_equal(expected, ts)
idx = [Timestamp('2013-05-31 00:00'),
Timestamp(datetime(2013, 5, 31, 23, 59, 59, 999999))]
ts = Series(lrange(len(idx)), index=idx)
expected = ts['2013']
assert_series_equal(expected, ts)
# GH14826, indexing with a seconds resolution string / datetime object
df = DataFrame(np.random.rand(5, 5),
columns=['open', 'high', 'low', 'close', 'volume'],
index=date_range('2012-01-02 18:01:00',
periods=5, tz='US/Central', freq='s'))
expected = df.loc[[df.index[2]]]
# this is a single date, so will raise
pytest.raises(KeyError, df.__getitem__, '2012-01-02 18:01:02', )
pytest.raises(KeyError, df.__getitem__, df.index[2], )
class TestDatetimeIndexing(object):
"""
Also test support for datetime64[ns] in Series / DataFrame
"""
def setup_method(self, method):
dti = DatetimeIndex(start=datetime(2005, 1, 1),
end=datetime(2005, 1, 10), freq='Min')
self.series = Series(np.random.rand(len(dti)), dti)
def test_fancy_getitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
assert s[48] == 48
assert s['1/2/2009'] == 48
assert s['2009-1-2'] == 48
assert s[datetime(2009, 1, 2)] == 48
assert s[lib.Timestamp(datetime(2009, 1, 2))] == 48
pytest.raises(KeyError, s.__getitem__, '2009-1-3')
assert_series_equal(s['3/6/2009':'2009-06-05'],
s[datetime(2009, 3, 6):datetime(2009, 6, 5)])
def test_fancy_setitem(self):
dti = DatetimeIndex(freq='WOM-1FRI', start=datetime(2005, 1, 1),
end=datetime(2010, 1, 1))
s = Series(np.arange(len(dti)), index=dti)
s[48] = -1
assert s[48] == -1
s['1/2/2009'] = -2
assert s[48] == -2
s['1/2/2009':'2009-06-05'] = -3
assert (s[48:54] == -3).all()
def test_dti_snap(self):
dti = DatetimeIndex(['1/1/2002', '1/2/2002', '1/3/2002', '1/4/2002',
'1/5/2002', '1/6/2002', '1/7/2002'], freq='D')
res = dti.snap(freq='W-MON')
exp = date_range('12/31/2001', '1/7/2002', freq='w-mon')
exp = exp.repeat([3, 4])
assert (res == exp).all()
res = dti.snap(freq='B')
exp = date_range('1/1/2002', '1/7/2002', freq='b')
exp = exp.repeat([1, 1, 1, 2, 2])
assert (res == exp).all()
def test_dti_reset_index_round_trip(self):
dti = DatetimeIndex(start='1/1/2001', end='6/1/2001', freq='D')
d1 = DataFrame({'v': np.random.rand(len(dti))}, index=dti)
d2 = d1.reset_index()
assert d2.dtypes[0] == np.dtype('M8[ns]')
d3 = d2.set_index('index')
assert_frame_equal(d1, d3, check_names=False)
# #2329
stamp = datetime(2012, 11, 22)
df = DataFrame([[stamp, 12.1]], columns=['Date', 'Value'])
df = df.set_index('Date')
assert df.index[0] == stamp
assert df.reset_index()['Date'][0] == stamp
def test_series_set_value(self):
# #1561
dates = [datetime(2001, 1, 1), datetime(2001, 1, 2)]
index = DatetimeIndex(dates)
s = Series().set_value(dates[0], 1.)
s2 = s.set_value(dates[1], np.nan)
exp = Series([1., np.nan], index=index)
assert_series_equal(s2, exp)
# s = Series(index[:1], index[:1])
# s2 = s.set_value(dates[1], index[1])
# assert s2.values.dtype == 'M8[ns]'
@slow
def test_slice_locs_indexerror(self):
times = [datetime(2000, 1, 1) + timedelta(minutes=i * 10)
for i in range(100000)]
s = Series(lrange(100000), times)
s.loc[datetime(1900, 1, 1):datetime(2100, 1, 1)]
def test_slicing_datetimes(self):
# GH 7523
# unique
df = DataFrame(np.arange(4., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
# duplicates
df = pd.DataFrame(np.arange(5., dtype='float64'),
index=[datetime(2001, 1, i, 10, 00)
for i in [1, 2, 2, 3, 4]])
result = df.loc[datetime(2001, 1, 1, 10):]
assert_frame_equal(result, df)
result = df.loc[:datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 10):datetime(2001, 1, 4, 10)]
assert_frame_equal(result, df)
result = df.loc[datetime(2001, 1, 1, 11):]
expected = df.iloc[1:]
assert_frame_equal(result, expected)
result = df.loc['20010101 11':]
assert_frame_equal(result, expected)
def test_frame_datetime64_duplicated(self):
dates = date_range('2010-07-01', end='2010-08-05')
tst = DataFrame({'symbol': 'AAA', 'date': dates})
result = tst.duplicated(['date', 'symbol'])
assert (-result).all()
tst = DataFrame({'date': dates})
result = tst.duplicated()
assert (-result).all()
class TestNatIndexing(object):
def setup_method(self, method):
self.series = Series(date_range('1/1/2000', periods=10))
# ---------------------------------------------------------------------
# NaT support
def test_set_none_nan(self):
self.series[3] = None
assert self.series[3] is NaT
self.series[3:5] = None
assert self.series[4] is NaT
self.series[5] = np.nan
assert self.series[5] is NaT
self.series[5:7] = np.nan
assert self.series[6] is NaT
def test_nat_operations(self):
# GH 8617
s = Series([0, pd.NaT], dtype='m8[ns]')
exp = s[0]
assert s.median() == exp
assert s.min() == exp
assert s.max() == exp
def test_round_nat(self):
# GH14940
s = Series([pd.NaT])
expected = Series(pd.NaT)
for method in ["round", "floor", "ceil"]:
round_method = getattr(s.dt, method)
for freq in ["s", "5s", "min", "5min", "h", "5h"]:
assert_series_equal(round_method(freq), expected)
| mit |
Bioinformatics-Support-Unit/python-scripts | zic1/zic1_correlation.py | 1 | 2362 | import sys
from scipy.stats.stats import pearsonr
import matplotlib
from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
import matplotlib.mlab as mlab
def main(f):
fh = open(f, 'r')
zic1_expression = []
other_expression = {}
for line in fh:
tokens = line.split('\t')
if line.startswith('206373_at'):
i = 0
while i < len(tokens):
if (i-1) % 4 == 0:
zic1_expression.append(tokens[i])
i+=1
zic1_expression = map(float, zic1_expression)
elif line.startswith('Scan'):
pass
else:
other_expression[tokens[0]] = []
i = 0
while i < len(tokens):
if (i-1) % 4 == 0:
other_expression[tokens[0]].append(tokens[i])
i+=1
other_expression[tokens[0]] = map(float, other_expression[tokens[0]])
plotting(zic1_expression, other_expression)
def plotting(zic1,comparators):
"""docstring for plotting"""
from mapping import probe_map
for key in comparators.keys():
corr = pearsonr(zic1, comparators[key])
#the string of correlation stats
s = 'R = '+str(corr[0])+'\nP = '+str(corr[1])
# Create a figure with size 6 x 6 inches.
fig = Figure(figsize=(6,6))
# Create a canvas and add the figure to it.
canvas = FigureCanvas(fig)
# Create a subplot.
ax = fig.add_subplot(111)
# Set the title.
ax.set_title(s,fontsize=10)
# Set the X Axis label.
ax.set_xlabel('Samples',fontsize=8)
# Set the Y Axis label.
ax.set_ylabel('Normalized Expression',fontsize=8)
# Display Grid.
ax.grid(True,linestyle='-',color='0.75')
# Generate the Scatter Plot.
ax.plot(range(1,25), zic1, 'go-', label=probe_map['206373_at'])
ax.plot(range(1,25), comparators[key], 'r^-', label=probe_map[key])
# add the legend
ax.legend()
#ax.text(0.1,max(zic1),s)
# Save the generated Scatter Plot to a PNG file.
canvas.print_figure('correlations/'+key+'.png',dpi=500)
if __name__ == '__main__':
if sys.argv[1]:
main(sys.argv[1])
else:
main('processed_filtered.txt')
| mit |
chaluemwut/fbserver | venv/lib/python2.7/site-packages/sklearn/decomposition/tests/test_nmf.py | 33 | 6189 | import numpy as np
from scipy import linalg
from sklearn.decomposition import nmf
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
random_state = np.random.mtrand.RandomState(0)
@raises(ValueError)
def test_initialize_nn_input():
"""Test NNDSVD behaviour on negative input"""
nmf._initialize_nmf(-np.ones((2, 2)), 2)
def test_initialize_nn_output():
"""Test that NNDSVD does not return negative values"""
data = np.abs(random_state.randn(10, 10))
for var in (None, 'a', 'ar'):
W, H = nmf._initialize_nmf(data, 10, random_state=0)
assert_false((W < 0).any() or (H < 0).any())
def test_initialize_close():
"""Test NNDSVD error
Test that _initialize_nmf error is less than the standard deviation of the
entries in the matrix.
"""
A = np.abs(random_state.randn(10, 10))
W, H = nmf._initialize_nmf(A, 10)
error = linalg.norm(np.dot(W, H) - A)
sdev = linalg.norm(A - A.mean())
assert_true(error <= sdev)
def test_initialize_variants():
"""Test NNDSVD variants correctness
Test that the variants 'a' and 'ar' differ from basic NNDSVD only where
the basic version has zeros.
"""
data = np.abs(random_state.randn(10, 10))
W0, H0 = nmf._initialize_nmf(data, 10, variant=None)
Wa, Ha = nmf._initialize_nmf(data, 10, variant='a')
War, Har = nmf._initialize_nmf(data, 10, variant='ar', random_state=0)
for ref, evl in ((W0, Wa), (W0, War), (H0, Ha), (H0, Har)):
assert_true(np.allclose(evl[ref != 0], ref[ref != 0]))
@raises(ValueError)
def test_projgrad_nmf_fit_nn_input():
"""Test model fit behaviour on negative input"""
A = -np.ones((2, 2))
m = nmf.ProjectedGradientNMF(n_components=2, init=None, random_state=0)
m.fit(A)
def test_projgrad_nmf_fit_nn_output():
"""Test that the decomposition does not contain negative values"""
A = np.c_[5 * np.ones(5) - np.arange(1, 6),
5 * np.ones(5) + np.arange(1, 6)]
for init in (None, 'nndsvd', 'nndsvda', 'nndsvdar'):
model = nmf.ProjectedGradientNMF(n_components=2, init=init,
random_state=0)
transf = model.fit_transform(A)
assert_false((model.components_ < 0).any() or
(transf < 0).any())
def test_projgrad_nmf_fit_close():
"""Test that the fit is not too far away"""
pnmf = nmf.ProjectedGradientNMF(5, init='nndsvda', random_state=0)
X = np.abs(random_state.randn(6, 5))
assert_less(pnmf.fit(X).reconstruction_err_, 0.05)
def test_nls_nn_output():
"""Test that NLS solver doesn't return negative values"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, -A), A.T, A, 0.001, 100)
assert_false((Ap < 0).any())
def test_nls_close():
"""Test that the NLS results should be close"""
A = np.arange(1, 5).reshape(1, -1)
Ap, _, _ = nmf._nls_subproblem(np.dot(A.T, A), A.T, np.zeros_like(A),
0.001, 100)
assert_true((np.abs(Ap - A) < 0.01).all())
def test_projgrad_nmf_transform():
"""Test that NMF.transform returns close values
(transform uses scipy.optimize.nnls for now)
"""
A = np.abs(random_state.randn(6, 5))
m = nmf.ProjectedGradientNMF(n_components=5, init='nndsvd', random_state=0)
transf = m.fit_transform(A)
assert_true(np.allclose(transf, m.transform(A), atol=1e-2, rtol=0))
def test_n_components_greater_n_features():
"""Smoke test for the case of more components than features."""
A = np.abs(random_state.randn(30, 10))
nmf.ProjectedGradientNMF(n_components=15, sparseness='data',
random_state=0).fit(A)
def test_projgrad_nmf_sparseness():
"""Test sparseness
Test that sparsity constraints actually increase sparseness in the
part where they are applied.
"""
A = np.abs(random_state.randn(10, 10))
m = nmf.ProjectedGradientNMF(n_components=5, random_state=0).fit(A)
data_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='data',
random_state=0).fit(A).data_sparseness_
comp_sp = nmf.ProjectedGradientNMF(n_components=5, sparseness='components',
random_state=0).fit(A).comp_sparseness_
assert_greater(data_sp, m.data_sparseness_)
assert_greater(comp_sp, m.comp_sparseness_)
def test_sparse_input():
"""Test that sparse matrices are accepted as input"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(10, 10))
A[:, 2 * np.arange(5)] = 0
T1 = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999).fit_transform(A)
A_sparse = csc_matrix(A)
pg_nmf = nmf.ProjectedGradientNMF(n_components=5, init='random',
random_state=999)
T2 = pg_nmf.fit_transform(A_sparse)
assert_array_almost_equal(pg_nmf.reconstruction_err_,
linalg.norm(A - np.dot(T2, pg_nmf.components_),
'fro'))
assert_array_almost_equal(T1, T2)
# same with sparseness
T2 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A_sparse)
T1 = nmf.ProjectedGradientNMF(
n_components=5, init='random', sparseness='data',
random_state=999).fit_transform(A)
def test_sparse_transform():
"""Test that transform works on sparse data. Issue #2124"""
from scipy.sparse import csc_matrix
A = np.abs(random_state.randn(5, 4))
A[A > 1.0] = 0
A = csc_matrix(A)
model = nmf.NMF()
A_fit_tr = model.fit_transform(A)
A_tr = model.transform(A)
# This solver seems pretty inconsistent
assert_array_almost_equal(A_fit_tr, A_tr, decimal=2)
if __name__ == '__main__':
import nose
nose.run(argv=['', __file__])
| apache-2.0 |
AdamRTomkins/libSpineML | libSpineML/smlUtils.py | 1 | 10142 | """
A script to convert the drosophila connectome into SpineML
This build upon the pure data to add in the required infered network components:
# Install libSpineML from source
# https://github.com/AdamRTomkins/libSpineML
"""
from __future__ import division
from libSpineML import smlExperiment as exp
from libSpineML import smlNetwork as net
from libSpineML import smlComponent as com
import csv
import sys
import cStringIO
import graphviz as gv
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import copy
neuron_fieldnames = ['neuron_name', 'innv_neuropil', 'mem_model', 'resting_pot', 'reset_pot', 'threshold_pot', 'rfact_period', 'Cm', 'tm']
neuron_property_list = ['resting_pot', 'reset_pot', 'threshold_pot', 'rfact_period', 'Cm', 'tm']
default_neuron_models ={}
default_neuron_models['LIF'] = {'resting_pot' :-60,
'reset_pot' :-70,
'threshold_pot' :-10,
'rfact_period' :0,
'Cm' :10,
'tm' :10
}
default_neuron_models['ESN'] = {}
def process_files(neuron_file,synapse_file):
""" Convert the neuron and synapse files into populations, projections and neurons """
# Process the text files
neuron_reader = csv.DictReader(open(neuron_file), fieldnames=neuron_fieldnames,delimiter=' ')
synapse_reader = csv.DictReader(open(synapse_file), fieldnames=synapse_fieldnames,delimiter=' ')
neurons = {}
populations = {}
projections = {}
for row in neuron_reader:
lpu = row['innv_neuropil']
name = row['neuron_name']
if lpu not in populations:
populations[lpu] = [name]
else:
populations[lpu].append(name)
neurons[name] = row
neurons[name]['index']= len(populations[lpu])-1
for row in synapse_reader:
pre_neuron = row['pre-neuron']
post_neuron = row['post-neuron']
# get the LPU of the pre neuron
pre_lpu = neurons[pre_neuron]['innv_neuropil']
# get the LPU index of the pre neuron
pre_index = neurons[pre_neuron]['index']
# get the LPU of the post neuron
post_lpu = neurons[post_neuron]['innv_neuropil']
# get the LPU index of the post neuron
post_index = neurons[post_neuron]['index']
if pre_lpu not in projections:
projections[pre_lpu] = {}
if post_lpu not in projections[pre_lpu]:
projections[pre_lpu][post_lpu] = []
projections[pre_lpu][post_lpu].append((pre_index,post_index))
return (neurons, populations, projections)
def create_spineml_network(neurons, populations,
projections,output=False,output_filename='model.xml',project_name= 'drosophila'):
""" convert projections and populations into a SpineML network """
output = {
'network' : {
'name':None,
'xml' : None
},
'components' : []
}
# create the network SpineML type
network = net.SpineMLType()
# for each population, create a Population type
for p in populations:
population = net.PopulationType()
# create a neuron type
neuron = net.NeuronType()
n = neurons.keys()[0] # The model neuron to use as the template
# Build this Neuron Sets Property list
# Currently all fixed value # TODO
for np in default_neuron_models['LIF'].keys():
# WIP: specify based on model
# Get non-default values
value = net.FixedValueType(default_neuron_models[neurons[n]['mem_model']][np]) # Currently using a fixed value, should use valuelist
name = np
dimension = '?'#Todo Add dimensions to property list
neuron_property = net.PropertyType()
neuron_property.set_name(name)
neuron_property.set_dimension(dimension)
neuron_property.set_AbstractValue(value)
neuron.add_Property(neuron_property)
neuron.set_name(p)
component_file_name = neurons[n]['mem_model']+'.xml'
neuron.set_url(component_file_name)
output['components'].append(component_file_name)
neuron.set_size(len(populations[p]))
# Assign to population
population.set_Neuron(neuron)
# create a projection
if p in projections:
for cn, destination in enumerate(projections[p]):
projection = net.ProjectionType(destination)
# Add synapses
#For every connection, we will create a new synapse
for index, connection in enumerate(projections[p][destination]):
synapse_file_name = 'CurrExp.xml'
# Create a PostSynapse
postSynapse = net.PostSynapseType(
name='CurrExp',
url = synapse_file_name,
Property=
[
net.PropertyType(name='tau_syn', AbstractValue=net.FixedValueType(value=10))
],
input_src_port=None,
input_dst_port=None,
output_src_port=None,
output_dst_port=None
)
output['components'].append(synapse_file_name)
## Create Connectivity
connection_list = net.ConnectionListType()
connection_type = net.ConnectionType(connection[0],connection[1],0) # zero delay
connection_list.add_Connection(connection_type)
weightValue = net.ValueType(index=int(index),value=float(connection[2]))
update_file_name = 'FixedWeight.xml'
# Create a PostSynapse
weightUpdate = net.WeightUpdateType(
name='"%s to %s Synapse %s weight_update' % (p,destination,cn),
url=update_file_name,
input_src_port="spike",
input_dst_port="spike",
feedback_src_port=None,
feedback_dst_port=None
)
output['components'].append(update_file_name)
prop = net.PropertyType(name='w',dimension="?")
prop.set_AbstractValue(weightValue)
io = cStringIO.StringIO()
prop.export(io,0)
st = io.getvalue()
weightUpdate.set_Property([prop])
io = cStringIO.StringIO()
weightUpdate.export(io,0)
st = io.getvalue()
# Create Synapse
synapse = net.SynapseType(
AbstractConnection=connection_list,
WeightUpdate=weightUpdate,
PostSynapse=postSynapse
)
projection.add_Synapse(synapse)
population.add_Projection(projection)
# add population to the network
network.add_Population(population)
# Write out network to xml
io = cStringIO.StringIO()
network.export(io,0)
network = io.getvalue()
# Cleanup Replace Abstract objects with non_abstract
subs = {
"AbstractConnection":"ConnectionList",
"AbstractValue":"FixedValue",
"Population":"LL:Population",
"Neuron":"LL:Neuron",
"Projection":"LL:Projection",
"<Synapse>":"<LL:Synapse>", # REQURED DUE TO PostSynapse Overlap
"</Synapse>":"</LL:Synapse>",
"<PostSynapse":"<LL:PostSynapse", # REQURED DUE TO PostSynapse Overlap
"</PostSynapse>":"</LL:PostSynapse>",
"ConnectionList": "LL:ConnectionList",
"WeightUpdate":"LL:WeightUpdate",
'<SpineMLType>':
'<LL:SpineML xsi:schemaLocation="http://www.shef.ac.uk/SpineMLLowLevelNetworkLayer SpineMLLowLevelNetworkLayer.xsd http://www.shef.ac.uk/SpineMLNetworkLayer SpineMLNetworkLayer.xsd" name="%s">' % project_name,
'</SpineMLType>':'</LL:SpineML>'
}
for k in subs:
network = network.replace(k,subs[k])
if output:
with open(output_filename, 'w') as f:
f.write(network)
# Create Output SpineML JSON reprentation
output['network']['name'] = 'model.xml'
output['network']['xml'] = network
# WIP: Add each component xml too
components = set(output['components'])
output['components'] = list(components)
return output
def create_graphviz_graph(populations,projections):
""" convert the projections matrix to a svg graph """
g1 = gv.Digraph(format='svg')
for lpu in populations.keys():
if lpu.lower() == lpu:
g1.node(lpu)
for pre in projections.keys():
if pre.lower() == pre:
for post in projections[pre]:
if post.lower() == post:
if len(projections[pre][post]) > 100:
g1.edge(pre, post,weight = str(len(projections[pre][post])))
filename = g1.render(filename='left_hemisphere')
def create_networkx_graph(populations,projections,prune=10):
""" convert the projections matrix to a svg graph """
network = nx.Graph()
lpus = populations.keys()
for lpu in lpus:
network.add_node(lpu)
for pre in projections.keys():
for post in projections[pre]:
if len(projections[pre][post]) > prune:
network.add_edge(pre, post, weight=1.0/len(projections[pre][post]))
return network
| gpl-3.0 |
yaukwankiu/twstocks | mark1.py | 1 | 9719 | # -*- coding: utf8 -*-
############################
# imports
import time
import datetime
import urllib2
import re
import os
import pickle
import numpy as np
import matplotlib.pyplot as plt
############################
# defining the parameters
currentPriceRegex = re.compile(r'(?<=\<td\ align\=\"center\"\ bgcolor\=\"\#FFFfff\"\ nowrap\>\<b\>)\d*\.\d*(?=\<\/b\>\<\/td\>)')
#companyNameRegex = re.compile( ur'(?<=\<TITLE\>).+(?=-公司資料-奇摩股市\<\/TITLE\>)',re.UNICODE) #doesn't work somehow
companyNameRegex = re.compile( ur'\<TITLE.+TITLE\>', re.UNICODE)
stockSymbolsList = []
outputFolder = "c:/chen chen/stocks/"
stockSymbolsFile='stockSymbols.pydump'
pricesFolder = outputFolder+ "prices/"
stocksFolder = outputFolder +"stocks/"
############################
#
############################
# defining the classes
class stock:
def __init__(self, symbol):
"""e.g.
https://tw.stock.yahoo.com/d/s/company_1473.html
"""
symbol= ('000'+str(symbol))[-4:]
self.symbol = symbol
self.yahooFrontPageUrl = 'https://tw.stock.yahoo.com/d/s/company_' + symbol + '.html'
self.yahooCurrentPageUrl = 'https://tw.stock.yahoo.com/q/q?s=' + symbol
# get some basic information from the front page
yahooFrontPage = urllib2.urlopen(self.yahooFrontPageUrl)
raw_text = yahooFrontPage.read()
self.name = companyNameRegex.findall(raw_text)[0]
self.name = self.name[7:-26]
self.pricesList = []
def __call__(self):
outputString = ""
#outputString += self.symbol + '\n' #unnecessary
outputString += self.name + '\n'
outputString += self.yahooCurrentPageUrl + '\n'
if self.pricesList != []:
outputString += '\n'.join([time.asctime(time.localtime((v['pingTime'])))+ ": $" + str(v['price']) for v in self.pricesList])
print outputString
def openYahooCurrentPage(self):
self.yahooCurrentPage = urllib2.urlopen(self.yahooCurrentPageUrl)
def getCurrentPrice(self, verbose=True, showResponseTime=True):
self.openYahooCurrentPage()
t0 = time.time()
raw_text = self.yahooCurrentPage.read()
t1 = time.time()
self.yahooCurrentPage.close()
currentPrice = currentPriceRegex.findall(raw_text)[0]
self.currentPricePingTime = t0
self.currentPricePingReturnTime = t1
self.currentPrice = currentPrice
if verbose:
print "Time: ", time.asctime(time.localtime(t0)),
if showResponseTime:
print "(response time: ", t1-t0, ")",
#print self.symbol, #unnecessary
print self.name, "Price:", currentPrice
self.pricesList.append({'price' : currentPrice,
'pingTime' : t0,
'responseTime' : t1-t0,
})
return currentPrice, t0, t1-t0
def getPriceList(self, throttle=1, repetitions=-999, verbose=True):
count = 0
while count!= repetitions:
count +=1
p, t0, dt = self.getCurrentPrice(verbose=verbose)
self.pricesList.append({'price' : p,
'pingTime' : t0,
'responseTime' : dt,
})
if throttle>0:
time.sleep(throttle)
def writeCurrentPrice(self, verbose=True):
P = self.pricesList[-1] # the last one
currentPrice = P['price']
t0 = P['pingTime']
dt = P['responseTime']
outputString= ''
if not os.path.exists(pricesFolder+self.name+'.dat'):
outputString = "#time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+self.name+'.dat','a').write(outputString)
if verbose:
print self.name, outputString
def loadPrices(self, pricesPath="", eraseOld=True):
if eraseOld:
self.pricesList = []
if pricesPath == "":
pricesPath = pricesFolder + self.name + ".dat"
if not os.path.exists(pricesPath):
return 0
raw_text = open(pricesPath, 'r').read()
x = raw_text.split('\n')[1:]
xx = [v.split(',') for v in x]
for u in xx:
print u
if len(u) ==2:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0] ),
'responseTime': 0
})
elif len(u) ==3:
self.pricesList.append({'price' : float(u[1]),
'pingTime' : float(u[0]) ,
'responseTime': float(u[2])
})
def load(self, *args, **kwargs):
self.loadPrices(*args, **kwargs)
def plot(self, display=True):
y = [v['price'] for v in self.pricesList]
x = [v['pingTime'] for v in self.pricesList]
plt.plot(x,y)
plt.title(self.symbol)
if display:
plt.show()
############################
# defining the functions
def getStockSymbolsList1():
for N in range(9999):
try:
s = stock(N)
stockSymbolsList.append(N)
print N, s.name, "<-------------added"
except:
print N, "doesn't exist!"
return stocksSymbolsList
def getStockSymbolsList2(url="http://sheet1688.blogspot.tw/2008/11/blog-post_18.html"):
raw_text = urllib2.urlopen(url).read()
symbols = re.findall(ur'(?<=num\>)\d\d\d\d(?=\<\/td\>)', raw_text, re.UNICODE)
symbols.sort()
pickle.dump(symbols, open(outputFolder+stockSymbolsFile,'w'))
stockSymbolsList = symbols
return symbols
def loadStockSymbolsList(path=outputFolder+stockSymbolsFile):
stockSymbolsList = pickle.load(open(path,'r'))
return stockSymbolsList
def makeStocksList(inPath=outputFolder+stockSymbolsFile,
outputFolder=stocksFolder):
symbols = loadStockSymbolsList()
for N in symbols:
try:
st = stock(N)
pickle.dump(st, open(outputFolder+st.name+'.pydump','w'))
print st.name, "-->", outputFolder+st.name+'.pydump'
except:
print "stock symbol", N, "not found!!!!"
def loadStocksList(inputFolder=stocksFolder):
stocksList = []
L = os.listdir(inputFolder)
L.sort(key=lambda v: v[-13:-7])
for fileName in L:
stocksList.append(pickle.load(open(inputFolder+fileName,'r')))
return stocksList
############################
# test run
def main0():
for st in stocksList:
st()
st.getPriceList(repetitions=5, throttle=0.3)
def main1(throttle=0.5):
for st in stocksList:
st.load()
st()
print "=================="
while True:
time0 = time.time()
if time.time() - time0 > 600:
for st in stocksList:
st()
try:
st.writeCurrentPrice()
except:
print "writeCurrentPrice() -- error!"
time0 = time.time()
for st in stocksList:
st.getCurrentPrice()
time.sleep(throttle)
def main2():
print "=================="
print time.asctime(time.localtime(time.time()))
#symbols = loadStockSymbolsList()
while True:
stocks = loadStocksList() #clean up every day
while time.localtime(time.time()).tm_wday > 4: #weekends
pass
while time.localtime(time.time()).tm_hour<9:
pass
while (time.localtime(time.time()).tm_hour >=9 and \
time.localtime(time.time()).tm_hour < 13) or \
(time.localtime(time.time()).tm_hour==13 and time.localtime(time.time()).tm_min<=30):
for st in stocks:
try:
currentPrice, t0, dt = st.getCurrentPrice()
if not os.path.exists(pricesFolder+st.name+'.dat'):
outputString = "time, price, response time\n"
else:
outputString = ""
outputString += str(t0) + ", " + str(currentPrice)
if dt>1:
outputString += ", " + str(int(dt))
outputString += '\n'
open(pricesFolder+st.name+'.dat','a').write(outputString)
time.sleep(.5)
except:
print "ERROR!! <------ ", st.name
T = time.localtime()
print time.asctime(T)
#if T.tm_hour < 9 or T.tm_hour>=13 and T.tm_min>=30:
# time.sleep(86400 - (13-9)*3600 - 30*60)
print "End of the trading session of the day!"
def main(*args, **kwargs):
main1(*args, **kwargs)
if __name__=="__main__":
############################
# constructing examples
tainam = stock(symbol='1473')
chenpinsen = stock(symbol=2926)
ganung = stock(symbol=2374)
tungyang = stock(symbol=1319)
htc = stock(2498)
prince = stock(2511)
stocksList = [tainam, chenpinsen, ganung, tungyang, htc, prince]
##############################
# test run
main(60)
| cc0-1.0 |
yl565/statsmodels | statsmodels/tools/tests/test_grouputils.py | 31 | 11494 | import numpy as np
import pandas as pd
from statsmodels.tools.grouputils import Grouping
from statsmodels.tools.tools import categorical
from statsmodels.datasets import grunfeld, anes96
from pandas.util import testing as ptesting
class CheckGrouping(object):
def test_reindex(self):
# smoke test
self.grouping.reindex(self.grouping.index)
def test_count_categories(self):
self.grouping.count_categories(level=0)
np.testing.assert_equal(self.grouping.counts, self.expected_counts)
def test_sort(self):
# data frame
sorted_data, index = self.grouping.sort(self.data)
expected_sorted_data = self.data.sort_index()
ptesting.assert_frame_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.DataFrame))
np.testing.assert_(not index.equals(self.grouping.index))
# make sure it copied
if hasattr(sorted_data, 'equals'): # newer pandas
np.testing.assert_(not sorted_data.equals(self.data))
# 2d arrays
sorted_data, index = self.grouping.sort(self.data.values)
np.testing.assert_array_equal(sorted_data,
expected_sorted_data.values)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
# 1d series
series = self.data[self.data.columns[0]]
sorted_data, index = self.grouping.sort(series)
expected_sorted_data = series.sort_index()
ptesting.assert_series_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, pd.Series))
if hasattr(sorted_data, 'equals'):
np.testing.assert_(not sorted_data.equals(series))
# 1d array
array = series.values
sorted_data, index = self.grouping.sort(array)
expected_sorted_data = series.sort_index().values
np.testing.assert_array_equal(sorted_data, expected_sorted_data)
np.testing.assert_(isinstance(sorted_data, np.ndarray))
def test_transform_dataframe(self):
names = self.data.index.names
transformed_dataframe = self.grouping.transform_dataframe(
self.data,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
if len(names) > 1:
transformed_dataframe = self.grouping.transform_dataframe(
self.data, lambda x : x.mean(),
level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_dataframe,
expected.values)
def test_transform_array(self):
names = self.data.index.names
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(),
level=0)
expected = self.data.reset_index().groupby(names[0]
).apply(lambda x : x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
if len(names) > 1:
transformed_array = self.grouping.transform_array(
self.data.values,
lambda x : x.mean(), level=1)
expected = self.data.reset_index().groupby(names[1]
).apply(lambda x :
x.mean())[
self.data.columns]
np.testing.assert_array_equal(transformed_array,
expected.values)
def test_transform_slices(self):
names = self.data.index.names
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=0)
expected = self.data.reset_index().groupby(names[0]).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
if len(names) > 1:
transformed_slices = self.grouping.transform_slices(
self.data.values,
lambda x, idx : x.mean(0),
level=1)
expected = self.data.reset_index().groupby(names[1]
).mean()[
self.data.columns]
np.testing.assert_allclose(transformed_slices, expected.values,
rtol=1e-12, atol=1e-25)
def test_dummies_groups(self):
# smoke test, calls dummy_sparse under the hood
self.grouping.dummies_groups()
if len(self.grouping.group_names) > 1:
self.grouping.dummies_groups(level=1)
def test_dummy_sparse(self):
data = self.data
self.grouping.dummy_sparse()
expected = categorical(data.index.get_level_values(0).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(), expected)
if len(self.grouping.group_names) > 1:
self.grouping.dummy_sparse(level=1)
expected = categorical(data.index.get_level_values(1).values,
drop=True)
np.testing.assert_equal(self.grouping._dummies.toarray(),
expected)
class TestMultiIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
multi_index_data = grun_data.set_index(['firm', 'year'])
multi_index_panel = multi_index_data.index
cls.grouping = Grouping(multi_index_panel)
cls.data = multi_index_data
cls.expected_counts = [20] * 11
class TestIndexGrouping(CheckGrouping):
@classmethod
def setupClass(cls):
grun_data = grunfeld.load_pandas().data
index_data = grun_data.set_index(['firm'])
index_group = index_data.index
cls.grouping = Grouping(index_group)
cls.data = index_data
cls.expected_counts = [20] * 11
def test_init_api():
# make a multi-index panel
grun_data = grunfeld.load_pandas().data
multi_index_panel = grun_data.set_index(['firm', 'year']).index
grouping = Grouping(multi_index_panel)
# check group_names
np.testing.assert_array_equal(grouping.group_names, ['firm', 'year'])
# check shape
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# check index_int
np.testing.assert_array_equal(grouping.labels,
[[ 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
5, 5, 5, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8,
8, 8, 8, 8, 8, 8, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
4, 4, 4, 4, 4, 4, 4, 4, 4, 2, 2, 2, 2, 2, 2, 2, 2,
2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 7, 7,
7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
7, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9,
9, 9, 9, 9, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10, 10, 10, 10, 10, 10, 10, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13,
14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,
11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7,
8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4,
5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1,
2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12,
13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3, 4, 5, 6,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 0, 1, 2, 3,
4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]])
grouping = Grouping(multi_index_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
# make a multi-index grouping
anes_data = anes96.load_pandas().data
multi_index_groups = anes_data.set_index(['educ', 'income',
'TVnews']).index
grouping = Grouping(multi_index_groups)
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# make a list multi-index panel
list_panel = multi_index_panel.tolist()
grouping = Grouping(list_panel, names=['firms', 'year'])
np.testing.assert_array_equal(grouping.group_names, ['firms', 'year'])
np.testing.assert_array_equal(grouping.index_shape, (11, 20))
# make a list multi-index grouping
list_groups = multi_index_groups.tolist()
grouping = Grouping(list_groups, names=['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.group_names,
['educ', 'income', 'TVnews'])
np.testing.assert_array_equal(grouping.index_shape, (7, 24, 8))
# single-variable index grouping
index_group = multi_index_panel.get_level_values(0)
grouping = Grouping(index_group)
# the original multi_index_panel had it's name changed inplace above
np.testing.assert_array_equal(grouping.group_names, ['firms'])
np.testing.assert_array_equal(grouping.index_shape, (220,))
# single variable list grouping
list_group = multi_index_panel.get_level_values(0).tolist()
grouping = Grouping(list_group)
np.testing.assert_array_equal(grouping.group_names, ["group0"])
np.testing.assert_array_equal(grouping.index_shape, 11*20)
# test generic group names
grouping = Grouping(list_groups)
np.testing.assert_array_equal(grouping.group_names,
['group0', 'group1', 'group2'])
| bsd-3-clause |
hainm/scikit-learn | examples/preprocessing/plot_function_transformer.py | 161 | 1949 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
plt.scatter(X[:, 0], X[:, 1], c=y, s=50)
plt.show()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
s=50,
)
plt.show()
| bsd-3-clause |
JPGlaser/Tycho | src/tycho/read.py | 1 | 5586 | # Python Classes/Functions used to Import Tycho Datasets
# ------------------------------------- #
# Python Package Importing #
# ------------------------------------- #
# TO-DO: Add time back to the read state function for Tyler's code
# Importing Necessary System Packages
import math
import io
import os
import numpy as np
import matplotlib as plt
import random as rp
# Import the Amuse Base Packages
from amuse import datamodel
from amuse.units import nbody_system
from amuse.units import units
from amuse.units import constants
from amuse.datamodel import particle_attributes
from amuse.io import *
from amuse.lab import *
#from amuse.couple import multiples
# Import the Amuse Stellar Packages
from amuse.ic.kingmodel import new_king_model
from amuse.ic.kroupa import new_kroupa_mass_distribution
# Import cPickle/Pickle
try:
import pickle as pickle
except:
import pickle
# Tycho util import
from tycho import util
#from tycho import multiples2 as multiples
# ------------------------------------- #
# Defining Functions #
# ------------------------------------- #
def read_initial_state(file_prefix):
''' Reads in an initial state for the Tycho Module.
file_prefix: String Value for a Prefix to the Saved File
'''
# TODO: Also everything else in this function.
# First, Define the Directory where Initial State is Stored
file_dir = os.getcwd()+"/InitialState"
file_base = file_dir+"/"+file_prefix
# Second, Read the Master AMUSE Particle Set from a HDF5 File
file_format = "hdf5"
master_set = read_set_from_file(file_base+"_particles.hdf5", format=file_format, close_file=True)
# Third, unPickle the Initial Conditions Array
ic_file = open(file_base+"_ic.pkl", "rb")
ic_array = pickle.load(ic_file)
ic_file.close()
# Fourth, convert ic_array.total_smass and viral_radius from strings to floats
total_smass = float(ic_array.total_smass) | units.kg
viral_radius = float(ic_array.viral_radius) | units.m
# Fifth, Define the Master Set's Converter
converter = nbody_system.nbody_to_si(total_smass, viral_radius)
return master_set, ic_array, converter
# ------------------------------------ #
# RESTART FUNCTION #
# ------------------------------------ #
def read_state_from_file(restart_file, gravity_code, kep, SMALLN):
stars = read_set_from_file(restart_file+".stars.hdf5",'hdf5',version='2.0', close_file=True).copy()
stars_python = read_set_from_file(restart_file+".stars_python.hdf5",'hdf5',version='2.0', close_file=True).copy()
with open(restart_file + ".bookkeeping", "rb") as f:
bookkeeping = pickle.load(f)
f.close()
print(bookkeeping)
root_to_tree = {}
for root in stars:
if hasattr(root, 'components') and not root.components is None:
root_to_tree[root] = datamodel.trees.BinaryTreeOnParticle(root.components[0])
gravity_code.particles.add_particles(stars)
# print bookkeeping['model_time']
# gravity_code.set_begin_time = bookkeeping['model_time']
multiples_code = multiples.Multiples(gravity_code, SMALLN, kep, gravity_constant=units.constants.G)
multiples_code.neighbor_distance_factor = bookkeeping['neighbor_distance_factor']
multiples_code.neighbor_veto = bookkeeping['neighbor_veto']
multiples_code.multiples_external_tidal_correction = bookkeeping['multiples_external_tidal_correction']
multiples_code.multiples_integration_energy_error = bookkeeping['multiples_integration_energy_error']
multiples_code.multiples_internal_tidal_correction = bookkeeping['multiples_internal_tidal_correction']
multiples.root_index = bookkeeping['root_index']
multiples_code.root_to_tree = root_to_tree
# multiples_code.set_model_time = bookkeeping['model_time']
return stars_python, multiples_code
# ------------------------------------------ #
# RESTART CRASH FUNCTION #
# ------------------------------------------ #
def recover_crash(restart_file, gravity_code, kep, SMALLN):
# NEEDS SOME TENDER LOVE AND CARE
stars = read_set_from_file(restart_file+".stars.hdf5",'hdf5',version='2.0', close_file=True).copy()
stars_python = read_set_from_file(restart_file+".stars_python.hdf5",'hdf5',version='2.0', close_file=True).copy()
with open(restart_file + ".bookkeeping", "rb") as f:
bookkeeping = pickle.load(f)
f.close()
print(bookkeeping)
root_to_tree = {}
for root in stars:
if hasattr(root, 'components') and not root.components is None:
root_to_tree[root] = datamodel.trees.BinaryTreeOnParticle(root.components[0])
#gravity_code.particles.add_particles(stars)
#print bookkeeping['model_time']
gravity_code.set_begin_time = bookkeeping['model_time']
multiples_code = multiples.Multiples(gravity_code, SMALLN, kep, gravity_constant=units.constants.G)
multiples_code.neighbor_distance_factor = bookkeeping['neighbor_distance_factor']
multiples_code.neighbor_veto = bookkeeping['neighbor_veto']
multiples_code.multiples_external_tidal_correction = bookkeeping['multiples_external_tidal_correction']
multiples_code.multiples_integration_energy_error = bookkeeping['multiples_integration_energy_error']
multiples_code.multiples_internal_tidal_correction = bookkeeping['multiples_internal_tidal_correction']
multiples.root_index = bookkeeping['root_index']
multiples_code.root_to_tree = root_to_tree
#multiples_code.set_model_time = bookkeeping['model_time']
return bookkeeping['model_time'], multiples_code
| mit |
sdu-cfei/modest-py | modestpy/estim/ga/individual.py | 1 | 6300 | """
Copyright (c) 2017, University of Southern Denmark
All rights reserved.
This code is licensed under BSD 2-clause license.
See LICENSE file in the project root for license terms.
"""
import logging
import random
import pandas as pd
import numpy as np
import copy
from modestpy.estim.error import calc_err
class Individual(object):
def __init__(self, est_objects, population, genes=None,
use_init_guess=False, ftype='NRMSE'):
"""
Individual can be initialized using `genes` OR initial guess
in `est_objects` (genes are inferred from parameters and vice versa).
Otherwise, random genes are assumed.
:param est_objects: List of EstPar objects with estimated parameters
:type est_objects: list(EstPar)
:param Population population: Population instance
:param genes: Genes (can be also inferred from `parameters`)
:type genes: dict(str: float)
:param bool use_init_guess: If True, use initial guess from
`est_objects`
:param str ftype: Cost function type, 'RMSE' or 'NRMSE'
"""
self.logger = logging.getLogger(type(self).__name__)
# Reference to the population object
self.population = population
# Assign variables shared across the population
self.ideal = population.ideal
self.model = population.model
# Cost function type
self.ftype = ftype
# Deep copy EstPar instances to avoid sharing between individuals
self.est_par_objects = copy.deepcopy(est_objects)
# Generate genes
if not genes and not use_init_guess:
# Generate random genes
est_names = Individual._get_names(self.est_par_objects)
self.genes = Individual._random_genes(est_names)
elif genes and not use_init_guess:
# Use provided genes
self.genes = copy.deepcopy(genes)
elif use_init_guess and not genes:
# Infer genes from parameters
self.genes = dict()
for p in self.est_par_objects:
self.genes[p.name] = (p.value - p.lo) / (p.hi - p.lo)
assert self.genes[p.name] >= 0. and self.genes[p.name] <= 1., \
'Initial guess outside the bounds'
else:
msg = 'Either genes or parameters have to be None'
self.logger.error(msg)
raise ValueError(msg)
# Update parameters
self._update_parameters()
# Individual result
self.result = None
self.error = None
# Main methods ------------------------------
def calculate(self):
# Just in case, individual result and error
# are cleared before simulation
self.reset()
# Important to set estimated parameters just before simulation,
# because all individuals share the same model instance
self.model.set_param(self.est_par_df)
# Simulation
self.result = self.model.simulate()
# Make sure the returned result is not empty
assert self.result.empty is False, \
'Empty result returned from simulation... (?)'
# Calculate error
self.logger.debug("Calculating error ({}) in individual {}"
.format(self.ftype, self.genes))
self.error = calc_err(self.result, self.ideal, ftype=self.ftype)
def reset(self):
self.result = None
self.error = None
self.est_par_objects = copy.deepcopy(self.est_par_objects)
def set_gene(self, name, value):
self.genes[name] = value
self._update_parameters()
def get_gene(self, name):
return self.genes[name]
def get_sorted_gene_names(self):
return sorted(self.genes.keys())
def get_estimates(self, as_dict=False):
"""
:param as_dict: boolean (True to get dictionary instead DataFrame)
:return: DataFrame with estimated parameters
"""
df = pd.DataFrame()
for par in self.est_par_objects:
df[par.name] = np.array([par.value])
if as_dict:
return df.to_dict()
else:
return df
def get_estimates_and_error(self):
estimates = self.get_estimates()
estimates['_error_'] = self.error['tot']
return estimates
def get_clone(self):
clone = Individual(self.est_par_objects, self.population,
self.genes, ftype=self.ftype)
return clone
# Private methods ---------------------------
def _update_parameters(self):
# Calculate parameter values
self.est_par_objects = self._calc_parameters(self.genes)
# Convert estimated parameters to dataframe
self.est_par_df = Individual._est_pars_2_df(self.est_par_objects)
@staticmethod
def _est_pars_2_df(est_pars):
df = pd.DataFrame()
for p in est_pars:
df[p.name] = np.array([p.value])
return df
def _calc_parameters(self, genes):
"""
Calculates parameters based on genes and limits.
:return: None
"""
for par in self.est_par_objects:
gene = genes[par.name]
par.value = par.lo + gene * (par.hi - par.lo)
return self.est_par_objects
@staticmethod
def _random_genes(par_names):
"""
Generates random genes.
:return: dict(str: float)
"""
genes = dict()
for par in par_names:
g = 0
while g == 0: # Because random.random() can return 0
g = random.random()
genes[par] = g
return genes
@staticmethod
def _get_names(est_params):
names = list()
for par in est_params:
names.append(par.name)
return names
# Overriden methods --------------------------
def __str__(self):
s = 'Individual ('
for par in self.est_par_objects:
s += par.name + '={0:.3f}'.format(par.value)
s += ', '
# Delete trailing comma
s = s[:-2]
s += '), err='
if self.error:
s += '{:.4f} '.format(self.error['tot'])
else:
s += 'None'
return s
| bsd-2-clause |
PrashntS/scikit-learn | examples/applications/plot_model_complexity_influence.py | 323 | 6372 | """
==========================
Model Complexity Influence
==========================
Demonstrate how model complexity influences both prediction accuracy and
computational performance.
The dataset is the Boston Housing dataset (resp. 20 Newsgroups) for
regression (resp. classification).
For each class of models we make the model complexity vary through the choice
of relevant model parameters and measure the influence on both computational
performance (latency) and predictive power (MSE or Hamming Loss).
"""
print(__doc__)
# Author: Eustache Diemert <eustache@diemert.fr>
# License: BSD 3 clause
import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.parasite_axes import host_subplot
from mpl_toolkits.axisartist.axislines import Axes
from scipy.sparse.csr import csr_matrix
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn.metrics import mean_squared_error
from sklearn.svm.classes import NuSVR
from sklearn.ensemble.gradient_boosting import GradientBoostingRegressor
from sklearn.linear_model.stochastic_gradient import SGDClassifier
from sklearn.metrics import hamming_loss
###############################################################################
# Routines
# initialize random generator
np.random.seed(0)
def generate_data(case, sparse=False):
"""Generate regression/classification data."""
bunch = None
if case == 'regression':
bunch = datasets.load_boston()
elif case == 'classification':
bunch = datasets.fetch_20newsgroups_vectorized(subset='all')
X, y = shuffle(bunch.data, bunch.target)
offset = int(X.shape[0] * 0.8)
X_train, y_train = X[:offset], y[:offset]
X_test, y_test = X[offset:], y[offset:]
if sparse:
X_train = csr_matrix(X_train)
X_test = csr_matrix(X_test)
else:
X_train = np.array(X_train)
X_test = np.array(X_test)
y_test = np.array(y_test)
y_train = np.array(y_train)
data = {'X_train': X_train, 'X_test': X_test, 'y_train': y_train,
'y_test': y_test}
return data
def benchmark_influence(conf):
"""
Benchmark influence of :changing_param: on both MSE and latency.
"""
prediction_times = []
prediction_powers = []
complexities = []
for param_value in conf['changing_param_values']:
conf['tuned_params'][conf['changing_param']] = param_value
estimator = conf['estimator'](**conf['tuned_params'])
print("Benchmarking %s" % estimator)
estimator.fit(conf['data']['X_train'], conf['data']['y_train'])
conf['postfit_hook'](estimator)
complexity = conf['complexity_computer'](estimator)
complexities.append(complexity)
start_time = time.time()
for _ in range(conf['n_samples']):
y_pred = estimator.predict(conf['data']['X_test'])
elapsed_time = (time.time() - start_time) / float(conf['n_samples'])
prediction_times.append(elapsed_time)
pred_score = conf['prediction_performance_computer'](
conf['data']['y_test'], y_pred)
prediction_powers.append(pred_score)
print("Complexity: %d | %s: %.4f | Pred. Time: %fs\n" % (
complexity, conf['prediction_performance_label'], pred_score,
elapsed_time))
return prediction_powers, prediction_times, complexities
def plot_influence(conf, mse_values, prediction_times, complexities):
"""
Plot influence of model complexity on both accuracy and latency.
"""
plt.figure(figsize=(12, 6))
host = host_subplot(111, axes_class=Axes)
plt.subplots_adjust(right=0.75)
par1 = host.twinx()
host.set_xlabel('Model Complexity (%s)' % conf['complexity_label'])
y1_label = conf['prediction_performance_label']
y2_label = "Time (s)"
host.set_ylabel(y1_label)
par1.set_ylabel(y2_label)
p1, = host.plot(complexities, mse_values, 'b-', label="prediction error")
p2, = par1.plot(complexities, prediction_times, 'r-',
label="latency")
host.legend(loc='upper right')
host.axis["left"].label.set_color(p1.get_color())
par1.axis["right"].label.set_color(p2.get_color())
plt.title('Influence of Model Complexity - %s' % conf['estimator'].__name__)
plt.show()
def _count_nonzero_coefficients(estimator):
a = estimator.coef_.toarray()
return np.count_nonzero(a)
###############################################################################
# main code
regression_data = generate_data('regression')
classification_data = generate_data('classification', sparse=True)
configurations = [
{'estimator': SGDClassifier,
'tuned_params': {'penalty': 'elasticnet', 'alpha': 0.001, 'loss':
'modified_huber', 'fit_intercept': True},
'changing_param': 'l1_ratio',
'changing_param_values': [0.25, 0.5, 0.75, 0.9],
'complexity_label': 'non_zero coefficients',
'complexity_computer': _count_nonzero_coefficients,
'prediction_performance_computer': hamming_loss,
'prediction_performance_label': 'Hamming Loss (Misclassification Ratio)',
'postfit_hook': lambda x: x.sparsify(),
'data': classification_data,
'n_samples': 30},
{'estimator': NuSVR,
'tuned_params': {'C': 1e3, 'gamma': 2 ** -15},
'changing_param': 'nu',
'changing_param_values': [0.1, 0.25, 0.5, 0.75, 0.9],
'complexity_label': 'n_support_vectors',
'complexity_computer': lambda x: len(x.support_vectors_),
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
{'estimator': GradientBoostingRegressor,
'tuned_params': {'loss': 'ls'},
'changing_param': 'n_estimators',
'changing_param_values': [10, 50, 100, 200, 500],
'complexity_label': 'n_trees',
'complexity_computer': lambda x: x.n_estimators,
'data': regression_data,
'postfit_hook': lambda x: x,
'prediction_performance_computer': mean_squared_error,
'prediction_performance_label': 'MSE',
'n_samples': 30},
]
for conf in configurations:
prediction_performances, prediction_times, complexities = \
benchmark_influence(conf)
plot_influence(conf, prediction_performances, prediction_times,
complexities)
| bsd-3-clause |
evgchz/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_01_language_train_model.py | 254 | 2253 | """Build a language detector model
The goal of this exercise is to train a linear classifier on text features
that represent sequences of up to 3 consecutive characters so as to be
recognize natural languages by using the frequencies of short character
sequences as 'fingerprints'.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.linear_model import Perceptron
from sklearn.pipeline import Pipeline
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
# The training data folder must be passed as first argument
languages_data_folder = sys.argv[1]
dataset = load_files(languages_data_folder)
# Split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.5)
# TASK: Build a an vectorizer that splits strings into sequence of 1 to 3
# characters instead of word tokens
vectorizer = TfidfVectorizer(ngram_range=(1, 3), analyzer='char',
use_idf=False)
# TASK: Build a vectorizer / classifier pipeline using the previous analyzer
# the pipeline instance should stored in a variable named clf
clf = Pipeline([
('vec', vectorizer),
('clf', Perceptron()),
])
# TASK: Fit the pipeline on the training set
clf.fit(docs_train, y_train)
# TASK: Predict the outcome on the testing set in a variable named y_predicted
y_predicted = clf.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
#import pylab as pl
#pl.matshow(cm, cmap=pl.cm.jet)
#pl.show()
# Predict the result on some short new sentences:
sentences = [
u'This is a language detection test.',
u'Ceci est un test de d\xe9tection de la langue.',
u'Dies ist ein Test, um die Sprache zu erkennen.',
]
predicted = clf.predict(sentences)
for s, p in zip(sentences, predicted):
print(u'The language of "%s" is "%s"' % (s, dataset.target_names[p]))
| bsd-3-clause |
architecture-building-systems/CityEnergyAnalyst | cea/technologies/thermal_network/simplified_thermal_network.py | 2 | 29336 |
import math
import time
import geopandas as gpd
import numpy as np
import pandas as pd
import wntr
import cea.config
import cea.inputlocator
import cea.technologies.substation as substation
from cea.constants import P_WATER_KGPERM3, FT_WATER_TO_PA, FT_TO_M, M_WATER_TO_PA, HEAT_CAPACITY_OF_WATER_JPERKGK, SHAPEFILE_TOLERANCE
from cea.optimization.constants import PUMP_ETA
from cea.optimization.preprocessing.preprocessing_main import get_building_names_with_load
from cea.technologies.thermal_network.thermal_network_loss import calc_temperature_out_per_pipe
from cea.resources import geothermal
from cea.technologies.constants import NETWORK_DEPTH
from cea.utilities.epwreader import epw_reader
__author__ = "Jimeno A. Fonseca"
__copyright__ = "Copyright 2019, Architecture and Building Systems - ETH Zurich"
__credits__ = ["Jimeno A. Fonseca"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "Daren Thomas"
__email__ = "cea@arch.ethz.ch"
__status__ = "Production"
def calculate_ground_temperature(locator):
"""
calculate ground temperatures.
:param locator:
:return: list of ground temperatures, one for each hour of the year
:rtype: list[np.float64]
"""
weather_file = locator.get_weather_file()
T_ambient_C = epw_reader(weather_file)['drybulb_C']
network_depth_m = NETWORK_DEPTH # [m]
T_ground_K = geothermal.calc_ground_temperature(locator, T_ambient_C.values, network_depth_m)
return T_ground_K
def extract_network_from_shapefile(edge_shapefile_df, node_shapefile_df):
"""
Extracts network data into DataFrames for pipes and nodes in the network
:param edge_shapefile_df: DataFrame containing all data imported from the edge shapefile
:param node_shapefile_df: DataFrame containing all data imported from the node shapefile
:type edge_shapefile_df: DataFrame
:type node_shapefile_df: DataFrame
:return node_df: DataFrame containing all nodes and their corresponding coordinates
:return edge_df: list of edges and their corresponding lengths and start and end nodes
:rtype node_df: DataFrame
:rtype edge_df: DataFrame
"""
# create node dictionary with plant and consumer nodes
node_dict = {}
node_shapefile_df.set_index("Name", inplace=True)
node_shapefile_df = node_shapefile_df.astype('object')
node_shapefile_df['coordinates'] = node_shapefile_df['geometry'].apply(lambda x: x.coords[0])
# sort node_df by index number
node_sorted_index = node_shapefile_df.index.to_series().str.split('NODE', expand=True)[1].apply(int).sort_values(
ascending=True)
node_shapefile_df = node_shapefile_df.reindex(index=node_sorted_index.index)
for node, row in node_shapefile_df.iterrows():
coord_node = row['geometry'].coords[0]
coord_node_round = (round(coord_node[0], SHAPEFILE_TOLERANCE), round(coord_node[1], SHAPEFILE_TOLERANCE))
node_dict[coord_node_round] = node
# create edge dictionary with pipe lengths and start and end nodes
# complete node dictionary with missing nodes (i.e., joints)
edge_shapefile_df.set_index("Name", inplace=True)
edge_shapefile_df = edge_shapefile_df.astype('object')
edge_shapefile_df['coordinates'] = edge_shapefile_df['geometry'].apply(lambda x: x.coords[0])
# sort edge_df by index number
edge_sorted_index = edge_shapefile_df.index.to_series().str.split('PIPE', expand=True)[1].apply(int).sort_values(
ascending=True)
edge_shapefile_df = edge_shapefile_df.reindex(index=edge_sorted_index.index)
# assign edge properties
edge_shapefile_df['start node'] = ''
edge_shapefile_df['end node'] = ''
for pipe, row in edge_shapefile_df.iterrows():
# get the length of the pipe and add to dataframe
edge_coords = row['geometry'].coords
edge_shapefile_df.loc[pipe, 'length_m'] = row['geometry'].length
start_node = (round(edge_coords[0][0], SHAPEFILE_TOLERANCE), round(edge_coords[0][1], SHAPEFILE_TOLERANCE))
end_node = (round(edge_coords[1][0], SHAPEFILE_TOLERANCE), round(edge_coords[1][1], SHAPEFILE_TOLERANCE))
if start_node in node_dict.keys():
edge_shapefile_df.loc[pipe, 'start node'] = node_dict[start_node]
else:
print(f"The start node of {pipe} has no match in node_dict, check precision of the coordinates.")
if end_node in node_dict.keys():
edge_shapefile_df.loc[pipe, 'end node'] = node_dict[end_node]
else:
print(f"The end node of {pipe} has no match in node_dict, check precision of the coordinates.")
return node_shapefile_df, edge_shapefile_df
def get_thermal_network_from_shapefile(locator, network_type, network_name):
"""
This function reads the existing node and pipe network from a shapefile and produces an edge-node incidence matrix
(as defined by Oppelt et al., 2016) as well as the edge properties (length, start node, and end node) and node
coordinates.
"""
# import shapefiles containing the network's edges and nodes
network_edges_df = gpd.read_file(locator.get_network_layout_edges_shapefile(network_type, network_name))
network_nodes_df = gpd.read_file(locator.get_network_layout_nodes_shapefile(network_type, network_name))
# check duplicated NODE/PIPE IDs
duplicated_nodes = network_nodes_df[network_nodes_df.Name.duplicated(keep=False)]
duplicated_edges = network_edges_df[network_edges_df.Name.duplicated(keep=False)]
if duplicated_nodes.size > 0:
raise ValueError('There are duplicated NODE IDs:', duplicated_nodes)
if duplicated_edges.size > 0:
raise ValueError('There are duplicated PIPE IDs:', duplicated_nodes)
# get node and pipe information
node_df, edge_df = extract_network_from_shapefile(network_edges_df, network_nodes_df)
return edge_df, node_df
def calc_max_diameter(volume_flow_m3s, pipe_catalog, velocity_ms, peak_load_percentage):
volume_flow_m3s_corrected_to_design = volume_flow_m3s * peak_load_percentage / 100
diameter_m = math.sqrt((volume_flow_m3s_corrected_to_design / velocity_ms) * (4 / math.pi))
selection_of_catalog = pipe_catalog.loc[(pipe_catalog['D_int_m'] - diameter_m).abs().argsort()[:1]]
D_int_m = selection_of_catalog['D_int_m'].values[0]
Pipe_DN = selection_of_catalog['Pipe_DN'].values[0]
D_ext_m = selection_of_catalog['D_ext_m'].values[0]
D_ins_m = selection_of_catalog['D_ins_m'].values[0]
return Pipe_DN, D_ext_m, D_int_m, D_ins_m
def calc_head_loss_m(diameter_m, max_volume_flow_rates_m3s, coefficient_friction, length_m):
hf_L = (10.67 / (coefficient_friction ** 1.85)) * (max_volume_flow_rates_m3s ** 1.852) / (diameter_m ** 4.8704)
head_loss_m = hf_L * length_m
return head_loss_m
def calc_linear_thermal_loss_coefficient(diamter_ext_m, diamter_int_m, diameter_insulation_m):
r_out_m = diamter_ext_m / 2
r_in_m = diamter_int_m / 2
r_s_m = diameter_insulation_m / 2
k_pipe_WpermK = 58.7 # steel pipe
k_ins_WpermK = 0.059 # scalcium silicate insulation
resistance_mKperW = ((math.log(r_out_m / r_in_m) / k_pipe_WpermK) + (math.log(r_s_m / r_out_m) / k_ins_WpermK))
K_WperKm = 2 * math.pi / resistance_mKperW
return K_WperKm
def calc_thermal_loss_per_pipe(T_in_K, m_kgpers, T_ground_K, k_kWperK):
T_out_K = calc_temperature_out_per_pipe(T_in_K, m_kgpers, k_kWperK, T_ground_K)
DT = T_in_K - T_out_K
Q_loss_kWh = DT * m_kgpers * HEAT_CAPACITY_OF_WATER_JPERKGK / 1000
return Q_loss_kWh
def thermal_network_simplified(locator, config, network_name):
# local variables
network_type = config.thermal_network.network_type
min_head_substation_kPa = config.thermal_network.min_head_substation
thermal_transfer_unit_design_head_m = min_head_substation_kPa * 1000 / M_WATER_TO_PA
coefficient_friction_hazen_williams = config.thermal_network.hw_friction_coefficient
velocity_ms = config.thermal_network.peak_load_velocity
fraction_equivalent_length = config.thermal_network.equivalent_length_factor
peak_load_percentage = config.thermal_network.peak_load_percentage
# GET INFORMATION ABOUT THE NETWORK
edge_df, node_df = get_thermal_network_from_shapefile(locator, network_type, network_name)
# GET INFORMATION ABOUT THE DEMAND OF BUILDINGS AND CONNECT TO THE NODE INFO
# calculate substations for all buildings
# local variables
total_demand = pd.read_csv(locator.get_total_demand())
volume_flow_m3pers_building = pd.DataFrame()
T_sup_K_building = pd.DataFrame()
T_re_K_building = pd.DataFrame()
Q_demand_kWh_building = pd.DataFrame()
if network_type == "DH":
buildings_name_with_heating = get_building_names_with_load(total_demand, load_name='QH_sys_MWhyr')
buildings_name_with_space_heating = get_building_names_with_load(total_demand, load_name='Qhs_sys_MWhyr')
DHN_barcode = "0"
if (buildings_name_with_heating != [] and buildings_name_with_space_heating != []):
building_names = [building for building in buildings_name_with_heating if building in
node_df.Building.values]
substation.substation_main_heating(locator, total_demand, building_names, DHN_barcode=DHN_barcode)
else:
raise Exception('problem here')
for building_name in building_names:
substation_results = pd.read_csv(
locator.get_optimization_substations_results_file(building_name, "DH", DHN_barcode))
volume_flow_m3pers_building[building_name] = substation_results["mdot_DH_result_kgpers"] / P_WATER_KGPERM3
T_sup_K_building[building_name] = substation_results["T_supply_DH_result_K"]
T_re_K_building[building_name] = np.where(substation_results["T_return_DH_result_K"] >273.15,
substation_results["T_return_DH_result_K"], np.nan)
Q_demand_kWh_building[building_name] = (substation_results["Q_heating_W"] + substation_results[
"Q_dhw_W"]) / 1000
if network_type == "DC":
buildings_name_with_cooling = get_building_names_with_load(total_demand, load_name='QC_sys_MWhyr')
DCN_barcode = "0"
if buildings_name_with_cooling != []:
building_names = [building for building in buildings_name_with_cooling if building in
node_df.Building.values]
substation.substation_main_cooling(locator, total_demand, building_names, DCN_barcode=DCN_barcode)
else:
raise Exception('problem here')
for building_name in building_names:
substation_results = pd.read_csv(
locator.get_optimization_substations_results_file(building_name, "DC", DCN_barcode))
volume_flow_m3pers_building[building_name] = substation_results[
"mdot_space_cooling_data_center_and_refrigeration_result_kgpers"] / P_WATER_KGPERM3
T_sup_K_building[building_name] = substation_results[
"T_supply_DC_space_cooling_data_center_and_refrigeration_result_K"]
T_re_K_building[building_name] = substation_results[
"T_return_DC_space_cooling_data_center_and_refrigeration_result_K"]
Q_demand_kWh_building[building_name] = substation_results[
"Q_space_cooling_data_center_and_refrigeration_W"] / 1000
import cea.utilities
with cea.utilities.pushd(locator.get_thermal_network_folder()):
# Create a water network model
wn = wntr.network.WaterNetworkModel()
# add loads
building_base_demand_m3s = {}
for building in volume_flow_m3pers_building.keys():
building_base_demand_m3s[building] = volume_flow_m3pers_building[building].max()
pattern_demand = (volume_flow_m3pers_building[building].values / building_base_demand_m3s[building]).tolist()
wn.add_pattern(building, pattern_demand)
# add nodes
consumer_nodes = []
building_nodes_pairs = {}
building_nodes_pairs_inversed = {}
for node in node_df.iterrows():
if node[1]["Type"] == "CONSUMER":
demand_pattern = node[1]['Building']
base_demand_m3s = building_base_demand_m3s[demand_pattern]
consumer_nodes.append(node[0])
building_nodes_pairs[node[0]] = demand_pattern
building_nodes_pairs_inversed[demand_pattern] = node[0]
wn.add_junction(node[0],
base_demand=base_demand_m3s,
demand_pattern=demand_pattern,
elevation=thermal_transfer_unit_design_head_m,
coordinates=node[1]["coordinates"])
elif node[1]["Type"] == "PLANT":
base_head = int(thermal_transfer_unit_design_head_m*1.2)
start_node = node[0]
name_node_plant = start_node
wn.add_reservoir(start_node,
base_head=base_head,
coordinates=node[1]["coordinates"])
else:
wn.add_junction(node[0],
elevation=0,
coordinates=node[1]["coordinates"])
# add pipes
for edge in edge_df.iterrows():
length_m = edge[1]["length_m"]
edge_name = edge[0]
wn.add_pipe(edge_name, edge[1]["start node"],
edge[1]["end node"],
length=length_m * (1 + fraction_equivalent_length),
roughness=coefficient_friction_hazen_williams,
minor_loss=0.0,
status='OPEN')
# add options
wn.options.time.duration = 8759 * 3600 # this indicates epanet to do one year simulation
wn.options.time.hydraulic_timestep = 60 * 60
wn.options.time.pattern_timestep = 60 * 60
wn.options.solver.accuracy = 0.01
wn.options.solver.trials = 100
# 1st ITERATION GET MASS FLOWS AND CALCULATE DIAMETER
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
max_volume_flow_rates_m3s = results.link['flowrate'].abs().max()
pipe_names = max_volume_flow_rates_m3s.index.values
pipe_catalog = pd.read_excel(locator.get_database_distribution_systems(), sheet_name='THERMAL_GRID')
Pipe_DN, D_ext_m, D_int_m, D_ins_m = zip(
*[calc_max_diameter(flow, pipe_catalog, velocity_ms=velocity_ms, peak_load_percentage=peak_load_percentage) for
flow in max_volume_flow_rates_m3s])
pipe_dn = pd.Series(Pipe_DN, pipe_names)
diameter_int_m = pd.Series(D_int_m, pipe_names)
diameter_ext_m = pd.Series(D_ext_m, pipe_names)
diameter_ins_m = pd.Series(D_ins_m, pipe_names)
# 2nd ITERATION GET PRESSURE POINTS AND MASSFLOWS FOR SIZING PUMPING NEEDS - this could be for all the year
# modify diameter and run simulations
edge_df['Pipe_DN'] = pipe_dn
edge_df['D_int_m'] = D_int_m
for edge in edge_df.iterrows():
edge_name = edge[0]
pipe = wn.get_link(edge_name)
pipe.diameter = diameter_int_m[edge_name]
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
# 3rd ITERATION GET FINAL UTILIZATION OF THE GRID (SUPPLY SIDE)
# get accumulated head loss per hour
unitary_head_ftperkft = results.link['headloss'].abs()
unitary_head_mperm = unitary_head_ftperkft * FT_TO_M / (FT_TO_M * 1000)
head_loss_m = unitary_head_mperm.copy()
for column in head_loss_m.columns.values:
length_m = edge_df.loc[column]['length_m']
head_loss_m[column] = head_loss_m[column] * length_m
reservoir_head_loss_m = head_loss_m.sum(axis=1) + thermal_transfer_unit_design_head_m*1.2 # fixme: only one thermal_transfer_unit_design_head_m from one substation?
# apply this pattern to the reservoir and get results
base_head = reservoir_head_loss_m.max()
pattern_head_m = (reservoir_head_loss_m.values / base_head).tolist()
wn.add_pattern('reservoir', pattern_head_m)
reservoir = wn.get_node(name_node_plant)
reservoir.head_timeseries.base_value = int(base_head)
reservoir.head_timeseries._pattern = 'reservoir'
sim = wntr.sim.EpanetSimulator(wn)
results = sim.run_sim()
# POSTPROCESSING
# $ POSTPROCESSING - PRESSURE/HEAD LOSSES PER PIPE PER HOUR OF THE YEAR
# at the pipes
unitary_head_loss_supply_network_ftperkft = results.link['headloss'].abs()
linear_pressure_loss_Paperm = unitary_head_loss_supply_network_ftperkft * FT_WATER_TO_PA / (FT_TO_M * 1000)
head_loss_supply_network_Pa = linear_pressure_loss_Paperm.copy()
for column in head_loss_supply_network_Pa.columns.values:
length_m = edge_df.loc[column]['length_m']
head_loss_supply_network_Pa[column] = head_loss_supply_network_Pa[column] * length_m
head_loss_return_network_Pa = head_loss_supply_network_Pa.copy(0)
# at the substations
head_loss_substations_ft = results.node['head'][consumer_nodes].abs()
head_loss_substations_Pa = head_loss_substations_ft * FT_WATER_TO_PA
#POSTPORCESSING MASSFLOW RATES
# MASS_FLOW_RATE (EDGES)
flow_rate_supply_m3s = results.link['flowrate'].abs()
massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
# $ POSTPROCESSING - PRESSURE LOSSES ACCUMULATED PER HOUR OF THE YEAR (TIMES 2 to account for return)
accumulated_head_loss_supply_Pa = head_loss_supply_network_Pa.sum(axis=1)
accumulated_head_loss_return_Pa = head_loss_return_network_Pa.sum(axis=1)
accumulated_head_loss_substations_Pa = head_loss_substations_Pa.sum(axis=1)
accumulated_head_loss_total_Pa = accumulated_head_loss_supply_Pa + accumulated_head_loss_return_Pa + accumulated_head_loss_substations_Pa
# $ POSTPROCESSING - THERMAL LOSSES PER PIPE PER HOUR OF THE YEAR (SUPPLY)
# calculate the thermal characteristics of the grid
temperature_of_the_ground_K = calculate_ground_temperature(locator)
thermal_coeffcient_WperKm = pd.Series(
np.vectorize(calc_linear_thermal_loss_coefficient)(diameter_ext_m, diameter_int_m, diameter_ins_m), pipe_names)
average_temperature_supply_K = T_sup_K_building.mean(axis=1)
thermal_losses_supply_kWh = results.link['headloss'].copy()
thermal_losses_supply_kWh.reset_index(inplace=True, drop=True)
thermal_losses_supply_Wperm = thermal_losses_supply_kWh.copy()
for pipe in pipe_names:
length_m = edge_df.loc[pipe]['length_m']
massflow_kgs = massflow_supply_kgs[pipe]
k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
k_kWperK = k_WperKm_pipe * length_m / 1000
thermal_losses_supply_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_supply_K.values,
massflow_kgs.values,
temperature_of_the_ground_K,
k_kWperK,
)
thermal_losses_supply_Wperm[pipe] = (thermal_losses_supply_kWh[pipe] / length_m) * 1000
# return pipes
average_temperature_return_K = T_re_K_building.mean(axis=1)
thermal_losses_return_kWh = results.link['headloss'].copy()
thermal_losses_return_kWh.reset_index(inplace=True, drop=True)
for pipe in pipe_names:
length_m = edge_df.loc[pipe]['length_m']
massflow_kgs = massflow_supply_kgs[pipe]
k_WperKm_pipe = thermal_coeffcient_WperKm[pipe]
k_kWperK = k_WperKm_pipe * length_m / 1000
thermal_losses_return_kWh[pipe] = np.vectorize(calc_thermal_loss_per_pipe)(average_temperature_return_K.values,
massflow_kgs.values,
temperature_of_the_ground_K,
k_kWperK,
)
# WRITE TO DISK
# LINEAR PRESSURE LOSSES (EDGES)
linear_pressure_loss_Paperm.to_csv(locator.get_network_linear_pressure_drop_edges(network_type, network_name),
index=False)
# MASS_FLOW_RATE (EDGES)
flow_rate_supply_m3s = results.link['flowrate'].abs()
massflow_supply_kgs = flow_rate_supply_m3s * P_WATER_KGPERM3
massflow_supply_kgs.to_csv(locator.get_thermal_network_layout_massflow_edges_file(network_type, network_name),
index=False)
# VELOCITY (EDGES)
velocity_edges_ms = results.link['velocity'].abs()
velocity_edges_ms.to_csv(locator.get_thermal_network_velocity_edges_file(network_type, network_name),
index=False)
# PRESSURE LOSSES (NODES)
pressure_at_nodes_ft = results.node['pressure'].abs()
pressure_at_nodes_Pa = pressure_at_nodes_ft * FT_TO_M * M_WATER_TO_PA
pressure_at_nodes_Pa.to_csv(locator.get_network_pressure_at_nodes(network_type, network_name), index=False)
# MASS_FLOW_RATE (NODES)
# $ POSTPROCESSING - MASSFLOWRATES PER NODE PER HOUR OF THE YEAR
flow_rate_supply_nodes_m3s = results.node['demand'].abs()
massflow_supply_nodes_kgs = flow_rate_supply_nodes_m3s * P_WATER_KGPERM3
massflow_supply_nodes_kgs.to_csv(locator.get_thermal_network_layout_massflow_nodes_file(network_type, network_name),
index=False)
# thermal demand per building (no losses in the network or substations)
Q_demand_Wh_building = Q_demand_kWh_building * 1000
Q_demand_Wh_building.to_csv(locator.get_thermal_demand_csv_file(network_type, network_name), index=False)
# pressure losses total
# $ POSTPROCESSING - PUMPING NEEDS PER HOUR OF THE YEAR (TIMES 2 to account for return)
flow_rate_substations_m3s = results.node['demand'][consumer_nodes].abs()
head_loss_supply_kWperm = (linear_pressure_loss_Paperm * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
head_loss_return_kWperm = head_loss_supply_kWperm.copy()
pressure_loss_supply_edge_kW = (head_loss_supply_network_Pa * (flow_rate_supply_m3s * 3600)) / (3.6E6 * PUMP_ETA)
head_loss_return_kW = pressure_loss_supply_edge_kW.copy()
head_loss_substations_kW = (head_loss_substations_Pa * (flow_rate_substations_m3s * 3600)) / (3.6E6 * PUMP_ETA)
accumulated_head_loss_supply_kW = pressure_loss_supply_edge_kW.sum(axis=1)
accumulated_head_loss_return_kW = head_loss_return_kW.sum(axis=1)
accumulated_head_loss_substations_kW = head_loss_substations_kW.sum(axis=1)
accumulated_head_loss_total_kW = accumulated_head_loss_supply_kW + \
accumulated_head_loss_return_kW + \
accumulated_head_loss_substations_kW
head_loss_system_Pa = pd.DataFrame({"pressure_loss_supply_Pa": accumulated_head_loss_supply_Pa,
"pressure_loss_return_Pa": accumulated_head_loss_return_Pa,
"pressure_loss_substations_Pa": accumulated_head_loss_substations_Pa,
"pressure_loss_total_Pa": accumulated_head_loss_total_Pa})
head_loss_system_Pa.to_csv(locator.get_network_total_pressure_drop_file(network_type, network_name),
index=False)
# $ POSTPROCESSING - PLANT HEAT REQUIREMENT
plant_load_kWh = thermal_losses_supply_kWh.sum(axis=1) * 2 + Q_demand_kWh_building.sum(
axis=1) - accumulated_head_loss_total_kW.values
plant_load_kWh.to_csv(locator.get_thermal_network_plant_heat_requirement_file(network_type, network_name),
header=['thermal_load_kW'], index=False)
# pressure losses per piping system
pressure_loss_supply_edge_kW.to_csv(
locator.get_thermal_network_pressure_losses_edges_file(network_type, network_name), index=False)
# pressure losses per substation
head_loss_substations_kW = head_loss_substations_kW.rename(columns=building_nodes_pairs)
head_loss_substations_kW.to_csv(locator.get_thermal_network_substation_ploss_file(network_type, network_name),
index=False)
# pumping needs losses total
pumping_energy_system_kWh = pd.DataFrame({"pressure_loss_supply_kW": accumulated_head_loss_supply_kW,
"pressure_loss_return_kW": accumulated_head_loss_return_kW,
"pressure_loss_substations_kW": accumulated_head_loss_substations_kW,
"pressure_loss_total_kW": accumulated_head_loss_total_kW})
pumping_energy_system_kWh.to_csv(
locator.get_network_energy_pumping_requirements_file(network_type, network_name), index=False)
# pumping needs losses total
temperatures_plant_C = pd.DataFrame({"temperature_supply_K": average_temperature_supply_K,
"temperature_return_K": average_temperature_return_K})
temperatures_plant_C.to_csv(locator.get_network_temperature_plant(network_type, network_name), index=False)
# thermal losses
thermal_losses_supply_kWh.to_csv(locator.get_network_thermal_loss_edges_file(network_type, network_name),
index=False)
thermal_losses_supply_Wperm.to_csv(locator.get_network_linear_thermal_loss_edges_file(network_type, network_name),
index=False)
# thermal losses total
accumulated_thermal_losses_supply_kWh = thermal_losses_supply_kWh.sum(axis=1)
accumulated_thermal_losses_return_kWh = thermal_losses_return_kWh.sum(axis=1)
accumulated_thermal_loss_total_kWh = accumulated_thermal_losses_supply_kWh + accumulated_thermal_losses_return_kWh
thermal_losses_total_kWh = pd.DataFrame({"thermal_loss_supply_kW": accumulated_thermal_losses_supply_kWh,
"thermal_loss_return_kW": accumulated_thermal_losses_return_kWh,
"thermal_loss_total_kW": accumulated_thermal_loss_total_kWh})
thermal_losses_total_kWh.to_csv(locator.get_network_total_thermal_loss_file(network_type, network_name),
index=False)
# return average temperature of supply at the substations
T_sup_K_nodes = T_sup_K_building.rename(columns=building_nodes_pairs_inversed)
average_year = T_sup_K_nodes.mean(axis=1)
for node in node_df.index.values:
T_sup_K_nodes[node] = average_year
T_sup_K_nodes.to_csv(locator.get_network_temperature_supply_nodes_file(network_type, network_name),
index=False)
# return average temperature of return at the substations
T_return_K_nodes = T_re_K_building.rename(columns=building_nodes_pairs_inversed)
average_year = T_return_K_nodes.mean(axis=1)
for node in node_df.index.values:
T_return_K_nodes[node] = average_year
T_return_K_nodes.to_csv(locator.get_network_temperature_return_nodes_file(network_type, network_name),
index=False)
# summary of edges used for the calculation
fields_edges = ['length_m', 'Pipe_DN', 'Type_mat', 'D_int_m']
edge_df[fields_edges].to_csv(locator.get_thermal_network_edge_list_file(network_type, network_name))
fields_nodes = ['Type', 'Building']
node_df[fields_nodes].to_csv(locator.get_thermal_network_node_types_csv_file(network_type, network_name))
# correct diameter of network and save to the shapefile
from cea.utilities.dbf import dataframe_to_dbf, dbf_to_dataframe
fields = ['length_m', 'Pipe_DN', 'Type_mat']
edge_df = edge_df[fields]
edge_df['name'] = edge_df.index.values
network_edges_df = dbf_to_dataframe(
locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
network_edges_df = network_edges_df.merge(edge_df, left_on='Name', right_on='name', suffixes=('_x', ''))
network_edges_df = network_edges_df.drop(['Pipe_DN_x', 'Type_mat_x', 'name', 'length_m_x'], axis=1)
dataframe_to_dbf(network_edges_df,
locator.get_network_layout_edges_shapefile(network_type, network_name).split('.shp')[0] + '.dbf')
def main(config):
"""
run the whole network summary routine
"""
start = time.time()
locator = cea.inputlocator.InputLocator(scenario=config.scenario)
network_names = config.thermal_network.network_names
if len(network_names) == 0:
network_names = ['']
for network_name in network_names:
thermal_network_simplified(locator, config, network_name)
print("done.")
print(f"total time: {time.time() - start}")
if __name__ == '__main__':
main(cea.config.Configuration())
| mit |
marionleborgne/nupic.research | projects/sequence_prediction/discrete_sequences/plotMultiplePrediction.py | 12 | 3551 | #!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
Plot sequence prediction experiment with multiple possible outcomes
"""
import os
from matplotlib import pyplot
import matplotlib as mpl
import numpy
from plot import computeAccuracy
from plot import plotAccuracy
from plot import readExperiment
mpl.rcParams['pdf.fonttype'] = 42
pyplot.ion()
pyplot.close('all')
if __name__ == '__main__':
experiments = []
for num_prediction in [2, 4]:
experiments.append(os.path.join("tm/results",
"high-order-distributed-random-multiple-predictions",
"num_predictions{:2.1f}".format(num_prediction),
"0.log"))
# for num_prediction in [2, 4]:
# experiments.append(os.path.join("lstm/results",
# "high-order-distributed-random-multiple-predictions",
# "seed0.0num_predictions{:2.1f}".format(num_prediction),
# "0.log"))
for num_prediction in [2, 4]:
experiments.append(os.path.join("lstm/results",
"high-order-basic-random-multiple-predictions",
"seed0.0num_predictions{:2.1f}".format(num_prediction),
"0.log"))
# for num_prediction in [2, 4]:
# experiments.append(os.path.join("elm/results",
# "high-order-basic-random-multiple-predictions",
# "seed0.0num_predictions{:2.1f}".format(num_prediction),
# "0.log"))
for experiment in experiments:
data = readExperiment(experiment)
(accuracy, x) = computeAccuracy(data['predictions'],
data['truths'],
data['iterations'],
resets=data['resets'],
randoms=data['randoms'])
# perturbAt = data['sequenceCounter'][10000]
plotAccuracy((accuracy, x),
data['trains'],
window=200,
type=type,
label='NoiseExperiment',
hideTraining=True,
lineSize=1.0)
# pyplot.xlim([1200, 1750])
pyplot.xlabel('# of elements seen')
pyplot.legend(['HTM: 2 predictions',
'HTM: 4 predictions',
'LSTM: 2 predictions',
'LSTM: 4 predictions'], loc=4)
# pyplot.legend(['LSTM', 'HTM'])
pyplot.savefig('./result/model_performance_multiple_prediction.pdf') | agpl-3.0 |
sergiohgz/incubator-airflow | airflow/www/views.py | 2 | 111267 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import ast
import codecs
import copy
import datetime as dt
import inspect
import itertools
import json
import logging
import math
import os
import traceback
from collections import defaultdict
from datetime import timedelta
from functools import wraps
from textwrap import dedent
import bleach
import markdown
import nvd3
import pendulum
import pkg_resources
import sqlalchemy as sqla
from flask import (
abort, jsonify, redirect, url_for, request, Markup, Response,
current_app, render_template, make_response)
from flask import flash
from flask._compat import PY2
from flask_admin import BaseView, expose, AdminIndexView
from flask_admin.actions import action
from flask_admin.babel import lazy_gettext
from flask_admin.contrib.sqla import ModelView
from flask_admin.form.fields import DateTimeField
from flask_admin.tools import iterdecode
from jinja2 import escape
from jinja2.sandbox import ImmutableSandboxedEnvironment
from past.builtins import basestring, unicode
from pygments import highlight, lexers
from pygments.formatters import HtmlFormatter
from sqlalchemy import or_, desc, and_, union_all
from wtforms import (
Form, SelectField, TextAreaField, PasswordField,
StringField, validators)
import airflow
from airflow import configuration as conf
from airflow import models
from airflow import settings
from airflow.api.common.experimental.mark_tasks import (set_dag_run_state_to_running,
set_dag_run_state_to_success,
set_dag_run_state_to_failed)
from airflow.exceptions import AirflowException
from airflow.models import BaseOperator
from airflow.models import XCom, DagRun
from airflow.operators.subdag_operator import SubDagOperator
from airflow.ti_deps.dep_context import DepContext, QUEUE_DEPS, SCHEDULER_DEPS
from airflow.utils import timezone
from airflow.utils.dates import infer_time_unit, scale_time_units, parse_execution_date
from airflow.utils.db import create_session, provide_session
from airflow.utils.helpers import alchemy_to_dict
from airflow.utils.json import json_ser
from airflow.utils.net import get_hostname
from airflow.utils.state import State
from airflow.utils.timezone import datetime
from airflow.www import utils as wwwutils
from airflow.www.forms import (DateTimeForm, DateTimeWithNumRunsForm,
DateTimeWithNumRunsWithDagRunsForm)
from airflow.www.validators import GreaterEqualThan
QUERY_LIMIT = 100000
CHART_LIMIT = 200000
UTF8_READER = codecs.getreader('utf-8')
dagbag = models.DagBag(settings.DAGS_FOLDER)
login_required = airflow.login.login_required
current_user = airflow.login.current_user
logout_user = airflow.login.logout_user
FILTER_BY_OWNER = False
PAGE_SIZE = conf.getint('webserver', 'page_size')
if conf.getboolean('webserver', 'FILTER_BY_OWNER'):
# filter_by_owner if authentication is enabled and filter_by_owner is true
FILTER_BY_OWNER = not current_app.config['LOGIN_DISABLED']
def dag_link(v, c, m, p):
if m.dag_id is None:
return Markup()
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=dag_id,
execution_date=m.execution_date)
return Markup(
'<a href="{}">{}</a>'.format(url, dag_id))
def log_url_formatter(v, c, m, p):
return Markup(
'<a href="{m.log_url}">'
' <span class="glyphicon glyphicon-book" aria-hidden="true">'
'</span></a>').format(**locals())
def dag_run_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
url = url_for(
'airflow.graph',
dag_id=m.dag_id,
run_id=m.run_id,
execution_date=m.execution_date)
return Markup('<a href="{url}">{m.run_id}</a>'.format(**locals()))
def task_instance_link(v, c, m, p):
dag_id = bleach.clean(m.dag_id)
task_id = bleach.clean(m.task_id)
url = url_for(
'airflow.task',
dag_id=dag_id,
task_id=task_id,
execution_date=m.execution_date.isoformat())
url_root = url_for(
'airflow.graph',
dag_id=dag_id,
root=task_id,
execution_date=m.execution_date.isoformat())
return Markup(
"""
<span style="white-space: nowrap;">
<a href="{url}">{task_id}</a>
<a href="{url_root}" title="Filter on this task and upstream">
<span class="glyphicon glyphicon-filter" style="margin-left: 0px;"
aria-hidden="true"></span>
</a>
</span>
""".format(**locals()))
def state_token(state):
color = State.color(state)
return Markup(
'<span class="label" style="background-color:{color};">'
'{state}</span>'.format(**locals()))
def parse_datetime_f(value):
if not isinstance(value, dt.datetime):
return value
return timezone.make_aware(value)
def state_f(v, c, m, p):
return state_token(m.state)
def duration_f(v, c, m, p):
if m.end_date and m.duration:
return timedelta(seconds=m.duration)
def datetime_f(v, c, m, p):
attr = getattr(m, p)
dttm = attr.isoformat() if attr else ''
if timezone.utcnow().isoformat()[:4] == dttm[:4]:
dttm = dttm[5:]
return Markup("<nobr>{}</nobr>".format(dttm))
def nobr_f(v, c, m, p):
return Markup("<nobr>{}</nobr>".format(getattr(m, p)))
def label_link(v, c, m, p):
try:
default_params = ast.literal_eval(m.default_params)
except:
default_params = {}
url = url_for(
'airflow.chart', chart_id=m.id, iteration_no=m.iteration_no,
**default_params)
return Markup("<a href='{url}'>{m.label}</a>".format(**locals()))
def pool_link(v, c, m, p):
url = '/admin/taskinstance/?flt1_pool_equals=' + m.pool
return Markup("<a href='{url}'>{m.pool}</a>".format(**locals()))
def pygment_html_render(s, lexer=lexers.TextLexer):
return highlight(
s,
lexer(),
HtmlFormatter(linenos=True),
)
def render(obj, lexer):
out = ""
if isinstance(obj, basestring):
out += pygment_html_render(obj, lexer)
elif isinstance(obj, (tuple, list)):
for i, s in enumerate(obj):
out += "<div>List item #{}</div>".format(i)
out += "<div>" + pygment_html_render(s, lexer) + "</div>"
elif isinstance(obj, dict):
for k, v in obj.items():
out += '<div>Dict item "{}"</div>'.format(k)
out += "<div>" + pygment_html_render(v, lexer) + "</div>"
return out
def wrapped_markdown(s):
return '<div class="rich_doc">' + markdown.markdown(s) + "</div>"
attr_renderer = {
'bash_command': lambda x: render(x, lexers.BashLexer),
'hql': lambda x: render(x, lexers.SqlLexer),
'sql': lambda x: render(x, lexers.SqlLexer),
'doc': lambda x: render(x, lexers.TextLexer),
'doc_json': lambda x: render(x, lexers.JsonLexer),
'doc_rst': lambda x: render(x, lexers.RstLexer),
'doc_yaml': lambda x: render(x, lexers.YamlLexer),
'doc_md': wrapped_markdown,
'python_callable': lambda x: render(
inspect.getsource(x), lexers.PythonLexer),
}
def data_profiling_required(f):
"""Decorator for views requiring data profiling access"""
@wraps(f)
def decorated_function(*args, **kwargs):
if (
current_app.config['LOGIN_DISABLED'] or
(not current_user.is_anonymous() and current_user.data_profiling())
):
return f(*args, **kwargs)
else:
flash("This page requires data profiling privileges", "error")
return redirect(url_for('admin.index'))
return decorated_function
def fused_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=running')
return Markup("<a href='{0}'>{1}</a>".format(url, m.used_slots()))
def fqueued_slots(v, c, m, p):
url = (
'/admin/taskinstance/' +
'?flt1_pool_equals=' + m.pool +
'&flt2_state_equals=queued&sort=10&desc=1')
return Markup("<a href='{0}'>{1}</a>".format(url, m.queued_slots()))
def recurse_tasks(tasks, task_ids, dag_ids, task_id_to_dag):
if isinstance(tasks, list):
for task in tasks:
recurse_tasks(task, task_ids, dag_ids, task_id_to_dag)
return
if isinstance(tasks, SubDagOperator):
subtasks = tasks.subdag.tasks
dag_ids.append(tasks.subdag.dag_id)
for subtask in subtasks:
if subtask.task_id not in task_ids:
task_ids.append(subtask.task_id)
task_id_to_dag[subtask.task_id] = tasks.subdag
recurse_tasks(subtasks, task_ids, dag_ids, task_id_to_dag)
if isinstance(tasks, BaseOperator):
task_id_to_dag[tasks.task_id] = tasks.dag
def get_chart_height(dag):
"""
TODO(aoen): See [AIRFLOW-1263] We use the number of tasks in the DAG as a heuristic to
approximate the size of generated chart (otherwise the charts are tiny and unreadable
when DAGs have a large number of tasks). Ideally nvd3 should allow for dynamic-height
charts, that is charts that take up space based on the size of the components within.
"""
return 600 + len(dag.tasks) * 10
def get_date_time_num_runs_dag_runs_form_data(request, session, dag):
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
dttm = dag.latest_execution_date or timezone.utcnow()
base_date = request.args.get('base_date')
if base_date:
base_date = timezone.parse(base_date)
else:
# The DateTimeField widget truncates milliseconds and would loose
# the first dag run. Round to next second.
base_date = (dttm + timedelta(seconds=1)).replace(microsecond=0)
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
DR = models.DagRun
drs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(desc(DR.execution_date))
.limit(num_runs)
.all()
)
dr_choices = []
dr_state = None
for dr in drs:
dr_choices.append((dr.execution_date.isoformat(), dr.run_id))
if dttm == dr.execution_date:
dr_state = dr.state
# Happens if base_date was changed and the selected dag run is not in result
if not dr_state and drs:
dr = drs[0]
dttm = dr.execution_date
dr_state = dr.state
return {
'dttm': dttm,
'base_date': base_date,
'num_runs': num_runs,
'execution_date': dttm.isoformat(),
'dr_choices': dr_choices,
'dr_state': dr_state,
}
class Airflow(BaseView):
def is_visible(self):
return False
@expose('/')
@login_required
def index(self):
return self.render('airflow/dags.html')
@expose('/chart_data')
@data_profiling_required
@wwwutils.gzipped
# @cache.cached(timeout=3600, key_prefix=wwwutils.make_cache_key)
def chart_data(self):
from airflow import macros
import pandas as pd
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
csv = request.args.get('csv') == "true"
chart = session.query(models.Chart).filter_by(id=chart_id).first()
db = session.query(
models.Connection).filter_by(conn_id=chart.conn_id).first()
payload = {
"state": "ERROR",
"error": ""
}
# Processing templated fields
try:
args = ast.literal_eval(chart.default_params)
if type(args) is not type(dict()):
raise AirflowException('Not a dict')
except:
args = {}
payload['error'] += (
"Default params is not valid, string has to evaluate as "
"a Python dictionary. ")
request_dict = {k: request.args.get(k) for k in request.args}
args.update(request_dict)
args['macros'] = macros
sandbox = ImmutableSandboxedEnvironment()
sql = sandbox.from_string(chart.sql).render(**args)
label = sandbox.from_string(chart.label).render(**args)
payload['sql_html'] = Markup(highlight(
sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
payload['label'] = label
pd.set_option('display.max_colwidth', 100)
hook = db.get_hook()
try:
df = hook.get_pandas_df(
wwwutils.limit_sql(sql, CHART_LIMIT, conn_type=db.conn_type))
df = df.fillna(0)
except Exception as e:
payload['error'] += "SQL execution failed. Details: " + str(e)
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
if not payload['error'] and len(df) == CHART_LIMIT:
payload['warning'] = (
"Data has been truncated to {0}"
" rows. Expect incomplete results.").format(CHART_LIMIT)
if not payload['error'] and len(df) == 0:
payload['error'] += "Empty result set. "
elif (
not payload['error'] and
chart.sql_layout == 'series' and
chart.chart_type != "datatable" and
len(df.columns) < 3):
payload['error'] += "SQL needs to return at least 3 columns. "
elif (
not payload['error'] and
chart.sql_layout == 'columns' and
len(df.columns) < 2):
payload['error'] += "SQL needs to return at least 2 columns. "
elif not payload['error']:
import numpy as np
chart_type = chart.chart_type
data = None
if chart.show_datatable or chart_type == "datatable":
data = df.to_dict(orient="split")
data['columns'] = [{'title': c} for c in data['columns']]
payload['data'] = data
# Trying to convert time to something Highcharts likes
x_col = 1 if chart.sql_layout == 'series' else 0
if chart.x_is_date:
try:
# From string to datetime
df[df.columns[x_col]] = pd.to_datetime(
df[df.columns[x_col]])
df[df.columns[x_col]] = df[df.columns[x_col]].apply(
lambda x: int(x.strftime("%s")) * 1000)
except Exception as e:
payload['error'] = "Time conversion failed"
if chart_type == 'datatable':
payload['state'] = 'SUCCESS'
return wwwutils.json_response(payload)
else:
if chart.sql_layout == 'series':
# User provides columns (series, x, y)
xaxis_label = df.columns[1]
yaxis_label = df.columns[2]
df[df.columns[2]] = df[df.columns[2]].astype(np.float)
df = df.pivot_table(
index=df.columns[1],
columns=df.columns[0],
values=df.columns[2], aggfunc=np.sum)
else:
# User provides columns (x, y, metric1, metric2, ...)
xaxis_label = df.columns[0]
yaxis_label = 'y'
df.index = df[df.columns[0]]
df = df.sort(df.columns[0])
del df[df.columns[0]]
for col in df.columns:
df[col] = df[col].astype(np.float)
df = df.fillna(0)
NVd3ChartClass = chart_mapping.get(chart.chart_type)
NVd3ChartClass = getattr(nvd3, NVd3ChartClass)
nvd3_chart = NVd3ChartClass(x_is_date=chart.x_is_date)
for col in df.columns:
nvd3_chart.add_serie(name=col, y=df[col].tolist(), x=df[col].index.tolist())
try:
nvd3_chart.buildcontent()
payload['chart_type'] = nvd3_chart.__class__.__name__
payload['htmlcontent'] = nvd3_chart.htmlcontent
except Exception as e:
payload['error'] = str(e)
payload['state'] = 'SUCCESS'
payload['request_dict'] = request_dict
return wwwutils.json_response(payload)
@expose('/chart')
@data_profiling_required
def chart(self):
if conf.getboolean('core', 'secure_mode'):
abort(404)
with create_session() as session:
chart_id = request.args.get('chart_id')
embed = request.args.get('embed')
chart = session.query(models.Chart).filter_by(id=chart_id).first()
NVd3ChartClass = chart_mapping.get(chart.chart_type)
if not NVd3ChartClass:
flash(
"Not supported anymore as the license was incompatible, "
"sorry",
"danger")
redirect('/admin/chart/')
sql = ""
if chart.show_sql:
sql = Markup(highlight(
chart.sql,
lexers.SqlLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/nvd3.html',
chart=chart,
title="Airflow - Chart",
sql=sql,
label=chart.label,
embed=embed)
@expose('/dag_stats')
@login_required
@provide_session
def dag_stats(self, session=None):
ds = models.DagStat
ds.update(
dag_ids=[dag.dag_id for dag in dagbag.dags.values() if not dag.is_subdag]
)
qry = (
session.query(ds.dag_id, ds.state, ds.count)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.dag_states:
try:
count = data[dag.dag_id][state]
except Exception:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/task_stats')
@login_required
@provide_session
def task_stats(self, session=None):
TI = models.TaskInstance
DagRun = models.DagRun
Dag = models.DagModel
LastDagRun = (
session.query(DagRun.dag_id, sqla.func.max(DagRun.execution_date).label('execution_date'))
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state != State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.group_by(DagRun.dag_id)
.subquery('last_dag_run')
)
RunningDagRun = (
session.query(DagRun.dag_id, DagRun.execution_date)
.join(Dag, Dag.dag_id == DagRun.dag_id)
.filter(DagRun.state == State.RUNNING)
.filter(Dag.is_active == True)
.filter(Dag.is_subdag == False)
.subquery('running_dag_run')
)
# Select all task_instances from active dag_runs.
# If no dag_run is active, return task instances from most recent dag_run.
LastTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(LastDagRun, and_(
LastDagRun.c.dag_id == TI.dag_id,
LastDagRun.c.execution_date == TI.execution_date))
)
RunningTI = (
session.query(TI.dag_id.label('dag_id'), TI.state.label('state'))
.join(RunningDagRun, and_(
RunningDagRun.c.dag_id == TI.dag_id,
RunningDagRun.c.execution_date == TI.execution_date))
)
UnionTI = union_all(LastTI, RunningTI).alias('union_ti')
qry = (
session.query(UnionTI.c.dag_id, UnionTI.c.state, sqla.func.count())
.group_by(UnionTI.c.dag_id, UnionTI.c.state)
)
data = {}
for dag_id, state, count in qry:
if dag_id not in data:
data[dag_id] = {}
data[dag_id][state] = count
session.commit()
payload = {}
for dag in dagbag.dags.values():
payload[dag.safe_dag_id] = []
for state in State.task_states:
try:
count = data[dag.dag_id][state]
except:
count = 0
d = {
'state': state,
'count': count,
'dag_id': dag.dag_id,
'color': State.color(state)
}
payload[dag.safe_dag_id].append(d)
return wwwutils.json_response(payload)
@expose('/code')
@login_required
def code(self):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = dag_id
try:
with open(dag.fileloc, 'r') as f:
code = f.read()
html_code = highlight(
code, lexers.PythonLexer(), HtmlFormatter(linenos=True))
except IOError as e:
html_code = str(e)
return self.render(
'airflow/dag_code.html', html_code=html_code, dag=dag, title=title,
root=request.args.get('root'),
demo_mode=conf.getboolean('webserver', 'demo_mode'))
@expose('/dag_details')
@login_required
@provide_session
def dag_details(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
title = "DAG details"
TI = models.TaskInstance
states = (
session.query(TI.state, sqla.func.count(TI.dag_id))
.filter(TI.dag_id == dag_id)
.group_by(TI.state)
.all()
)
return self.render(
'airflow/dag_details.html',
dag=dag, title=title, states=states, State=State)
@current_app.errorhandler(404)
def circles(self):
return render_template(
'airflow/circles.html', hostname=get_hostname()), 404
@current_app.errorhandler(500)
def show_traceback(self):
from airflow.utils import asciiart as ascii_
return render_template(
'airflow/traceback.html',
hostname=get_hostname(),
nukular=ascii_.nukular,
info=traceback.format_exc()), 500
@expose('/noaccess')
def noaccess(self):
return self.render('airflow/noaccess.html')
@expose('/pickle_info')
@login_required
def pickle_info(self):
d = {}
dag_id = request.args.get('dag_id')
dags = [dagbag.dags.get(dag_id)] if dag_id else dagbag.dags.values()
for dag in dags:
if not dag.is_subdag:
d[dag.dag_id] = dag.pickle_info()
return wwwutils.json_response(d)
@expose('/login', methods=['GET', 'POST'])
def login(self):
return airflow.login.login(self, request)
@expose('/logout')
def logout(self):
logout_user()
flash('You have been logged out.')
return redirect(url_for('admin.index'))
@expose('/rendered')
@login_required
@wwwutils.action_logging
def rendered(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
task = copy.copy(dag.get_task(task_id))
ti = models.TaskInstance(task=task, execution_date=dttm)
try:
ti.render_templates()
except Exception as e:
flash("Error rendering template: " + str(e), "error")
title = "Rendered Template"
html_dict = {}
for template_field in task.__class__.template_fields:
content = getattr(task, template_field)
if template_field in attr_renderer:
html_dict[template_field] = attr_renderer[template_field](content)
else:
html_dict[template_field] = (
"<pre><code>" + str(content) + "</pre></code>")
return self.render(
'airflow/ti_code.html',
html_dict=html_dict,
dag=dag,
task_id=task_id,
execution_date=execution_date,
form=form,
title=title, )
@expose('/get_logs_with_metadata')
@login_required
@wwwutils.action_logging
@provide_session
def get_logs_with_metadata(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
try_number = int(request.args.get('try_number'))
metadata = request.args.get('metadata')
metadata = json.loads(metadata)
# metadata may be null
if not metadata:
metadata = {}
# Convert string datetime into actual datetime
try:
execution_date = timezone.parse(execution_date)
except ValueError:
error_message = (
'Given execution date, {}, could not be identified '
'as a date. Example date format: 2015-11-16T14:34:15+00:00'.format(
execution_date))
response = jsonify({'error': error_message})
response.status_code = 400
return response
logger = logging.getLogger('airflow.task')
task_log_reader = conf.get('core', 'task_log_reader')
handler = next((handler for handler in logger.handlers
if handler.name == task_log_reader), None)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
try:
if ti is None:
logs = ["*** Task instance did not exist in the DB\n"]
metadata['end_of_log'] = True
else:
dag = dagbag.get_dag(dag_id)
ti.task = dag.get_task(ti.task_id)
logs, metadatas = handler.read(ti, try_number, metadata=metadata)
metadata = metadatas[0]
for i, log in enumerate(logs):
if PY2 and not isinstance(log, unicode):
logs[i] = log.decode('utf-8')
message = logs[0]
return jsonify(message=message, metadata=metadata)
except AttributeError as e:
error_message = ["Task log handler {} does not support read logs.\n{}\n"
.format(task_log_reader, str(e))]
metadata['end_of_log'] = True
return jsonify(message=error_message, error=True, metadata=metadata)
@expose('/log')
@login_required
@wwwutils.action_logging
@provide_session
def log(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
ti = session.query(models.TaskInstance).filter(
models.TaskInstance.dag_id == dag_id,
models.TaskInstance.task_id == task_id,
models.TaskInstance.execution_date == dttm).first()
logs = [''] * (ti.next_try_number - 1 if ti is not None else 0)
return self.render(
'airflow/ti_log.html',
logs=logs, dag=dag, title="Log by attempts",
dag_id=dag.dag_id, task_id=task_id,
execution_date=execution_date, form=form)
@expose('/task')
@login_required
@wwwutils.action_logging
def task(self):
TI = models.TaskInstance
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
task = copy.copy(dag.get_task(task_id))
task.resolve_template_files()
ti = TI(task=task, execution_date=dttm)
ti.refresh_from_db()
ti_attrs = []
for attr_name in dir(ti):
if not attr_name.startswith('_'):
attr = getattr(ti, attr_name)
if type(attr) != type(self.task):
ti_attrs.append((attr_name, str(attr)))
task_attrs = []
for attr_name in dir(task):
if not attr_name.startswith('_'):
attr = getattr(task, attr_name)
if type(attr) != type(self.task) and \
attr_name not in attr_renderer:
task_attrs.append((attr_name, str(attr)))
# Color coding the special attributes that are code
special_attrs_rendered = {}
for attr_name in attr_renderer:
if hasattr(task, attr_name):
source = getattr(task, attr_name)
special_attrs_rendered[attr_name] = attr_renderer[attr_name](source)
no_failed_deps_result = [(
"Unknown",
dedent("""\
All dependencies are met but the task instance is not running.
In most cases this just means that the task will probably
be scheduled soon unless:<br/>
- The scheduler is down or under heavy load<br/>
- The following configuration values may be limiting the number
of queueable processes:
<code>parallelism</code>,
<code>dag_concurrency</code>,
<code>max_active_dag_runs_per_dag</code>,
<code>non_pooled_task_slot_count</code><br/>
{}
<br/>
If this task instance does not start soon please contact your Airflow """
"""administrator for assistance."""
.format(
"- This task instance already ran and had its state changed "
"manually (e.g. cleared in the UI)<br/>"
if ti.state == State.NONE else "")))]
# Use the scheduler's context to figure out which dependencies are not met
dep_context = DepContext(SCHEDULER_DEPS)
failed_dep_reasons = [(dep.dep_name, dep.reason) for dep in
ti.get_failed_dep_statuses(
dep_context=dep_context)]
title = "Task Instance Details"
return self.render(
'airflow/task.html',
task_attrs=task_attrs,
ti_attrs=ti_attrs,
failed_dep_reasons=failed_dep_reasons or no_failed_deps_result,
task_id=task_id,
execution_date=execution_date,
special_attrs_rendered=special_attrs_rendered,
form=form,
dag=dag, title=title)
@expose('/xcom')
@login_required
@wwwutils.action_logging
@provide_session
def xcom(self, session=None):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
# Carrying execution_date through, even though it's irrelevant for
# this context
execution_date = request.args.get('execution_date')
dttm = pendulum.parse(execution_date)
form = DateTimeForm(data={'execution_date': dttm})
dag = dagbag.get_dag(dag_id)
if not dag or task_id not in dag.task_ids:
flash(
"Task [{}.{}] doesn't seem to exist"
" at the moment".format(dag_id, task_id),
"error")
return redirect('/admin/')
xcomlist = session.query(XCom).filter(
XCom.dag_id == dag_id, XCom.task_id == task_id,
XCom.execution_date == dttm).all()
attributes = []
for xcom in xcomlist:
if not xcom.key.startswith('_'):
attributes.append((xcom.key, xcom.value))
title = "XCom"
return self.render(
'airflow/xcom.html',
attributes=attributes,
task_id=task_id,
execution_date=execution_date,
form=form,
dag=dag, title=title)
@expose('/run')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def run(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
ignore_all_deps = request.args.get('ignore_all_deps') == "true"
ignore_task_deps = request.args.get('ignore_task_deps') == "true"
ignore_ti_state = request.args.get('ignore_ti_state') == "true"
try:
from airflow.executors import GetDefaultExecutor
from airflow.executors.celery_executor import CeleryExecutor
executor = GetDefaultExecutor()
if not isinstance(executor, CeleryExecutor):
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
except ImportError:
# in case CeleryExecutor cannot be imported it is not active either
flash("Only works with the CeleryExecutor, sorry", "error")
return redirect(origin)
ti = models.TaskInstance(task=task, execution_date=execution_date)
ti.refresh_from_db()
# Make sure the task instance can be queued
dep_context = DepContext(
deps=QUEUE_DEPS,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
failed_deps = list(ti.get_failed_dep_statuses(dep_context=dep_context))
if failed_deps:
failed_deps_str = ", ".join(
["{}: {}".format(dep.dep_name, dep.reason) for dep in failed_deps])
flash("Could not queue task instance for execution, dependencies not met: "
"{}".format(failed_deps_str),
"error")
return redirect(origin)
executor.start()
executor.queue_task_instance(
ti,
ignore_all_deps=ignore_all_deps,
ignore_task_deps=ignore_task_deps,
ignore_ti_state=ignore_ti_state)
executor.heartbeat()
flash(
"Sent {} to the message queue, "
"it should start any moment now.".format(ti))
return redirect(origin)
@expose('/delete')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def delete(self):
from airflow.api.common.experimental import delete_dag
from airflow.exceptions import DagNotFound, DagFileExists
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
try:
delete_dag.delete_dag(dag_id)
except DagNotFound:
flash("DAG with id {} not found. Cannot delete".format(dag_id))
return redirect(request.referrer)
except DagFileExists:
flash("Dag id {} is still in DagBag. "
"Remove the DAG file first.".format(dag_id))
return redirect(request.referrer)
flash("Deleting DAG with id {}. May take a couple minutes to fully"
" disappear.".format(dag_id))
# Upon successful delete return to origin
return redirect(origin)
@expose('/trigger')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def trigger(self):
dag_id = request.args.get('dag_id')
origin = request.args.get('origin') or "/admin/"
dag = dagbag.get_dag(dag_id)
if not dag:
flash("Cannot find dag {}".format(dag_id))
return redirect(origin)
execution_date = timezone.utcnow()
run_id = "manual__{0}".format(execution_date.isoformat())
dr = DagRun.find(dag_id=dag_id, run_id=run_id)
if dr:
flash("This run_id {} already exists".format(run_id))
return redirect(origin)
run_conf = {}
dag.create_dagrun(
run_id=run_id,
execution_date=execution_date,
state=State.RUNNING,
conf=run_conf,
external_trigger=True
)
flash(
"Triggered {}, "
"it should start any moment now.".format(dag_id))
return redirect(origin)
def _clear_dag_tis(self, dag, start_date, end_date, origin,
recursive=False, confirmed=False):
if confirmed:
count = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive)
flash("{0} task instances have been cleared".format(count))
return redirect(origin)
tis = dag.clear(
start_date=start_date,
end_date=end_date,
include_subdags=recursive,
dry_run=True)
if not tis:
flash("No task instances to clear", 'error')
response = redirect(origin)
else:
details = "\n".join([str(t) for t in tis])
response = self.render(
'airflow/confirm.html',
message=("Here's the list of task instances you are about "
"to clear:"),
details=details)
return response
@expose('/clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
dag = dagbag.get_dag(dag_id)
execution_date = request.args.get('execution_date')
execution_date = pendulum.parse(execution_date)
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
recursive = request.args.get('recursive') == "true"
dag = dag.sub_dag(
task_regex=r"^{0}$".format(task_id),
include_downstream=downstream,
include_upstream=upstream)
end_date = execution_date if not future else None
start_date = execution_date if not past else None
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=recursive, confirmed=confirmed)
@expose('/dagrun_clear')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_clear(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
dag = dagbag.get_dag(dag_id)
execution_date = pendulum.parse(execution_date)
start_date = execution_date
end_date = execution_date
return self._clear_dag_tis(dag, start_date, end_date, origin,
recursive=True, confirmed=confirmed)
@expose('/blocked')
@login_required
@provide_session
def blocked(self, session=None):
DR = models.DagRun
dags = (
session.query(DR.dag_id, sqla.func.count(DR.id))
.filter(DR.state == State.RUNNING)
.group_by(DR.dag_id)
.all()
)
payload = []
for dag_id, active_dag_runs in dags:
max_active_runs = 0
if dag_id in dagbag.dags:
max_active_runs = dagbag.dags[dag_id].max_active_runs
payload.append({
'dag_id': dag_id,
'active_dag_run': active_dag_runs,
'max_active_runs': max_active_runs,
})
return wwwutils.json_response(payload)
def _mark_dagrun_state_as_failed(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_failed(dag, execution_date, commit=confirmed)
if confirmed:
flash('Marked failed on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as failed"),
details=details)
return response
def _mark_dagrun_state_as_success(self, dag_id, execution_date, confirmed, origin):
if not execution_date:
flash('Invalid execution date', 'error')
return redirect(origin)
execution_date = pendulum.parse(execution_date)
dag = dagbag.get_dag(dag_id)
if not dag:
flash('Cannot find DAG: {}'.format(dag_id), 'error')
return redirect(origin)
new_dag_state = set_dag_run_state_to_success(dag, execution_date,
commit=confirmed)
if confirmed:
flash('Marked success on {} task instances'.format(len(new_dag_state)))
return redirect(origin)
else:
details = '\n'.join([str(t) for t in new_dag_state])
response = self.render('airflow/confirm.html',
message=("Here's the list of task instances you are "
"about to mark as success"),
details=details)
return response
@expose('/dagrun_failed')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_failed(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_failed(dag_id, execution_date,
confirmed, origin)
@expose('/dagrun_success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def dagrun_success(self):
dag_id = request.args.get('dag_id')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == 'true'
origin = request.args.get('origin')
return self._mark_dagrun_state_as_success(dag_id, execution_date,
confirmed, origin)
def _mark_task_instance_state(self, dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, state):
dag = dagbag.get_dag(dag_id)
task = dag.get_task(task_id)
task.dag = dag
execution_date = pendulum.parse(execution_date)
if not dag:
flash("Cannot find DAG: {}".format(dag_id))
return redirect(origin)
if not task:
flash("Cannot find task {} in DAG {}".format(task_id, dag.dag_id))
return redirect(origin)
from airflow.api.common.experimental.mark_tasks import set_state
if confirmed:
altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=True)
flash("Marked {} on {} task instances".format(state, len(altered)))
return redirect(origin)
to_be_altered = set_state(task=task, execution_date=execution_date,
upstream=upstream, downstream=downstream,
future=future, past=past, state=state,
commit=False)
details = "\n".join([str(t) for t in to_be_altered])
response = self.render("airflow/confirm.html",
message=("Here's the list of task instances you are "
"about to mark as {}:".format(state)),
details=details)
return response
@expose('/failed')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def failed(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.FAILED)
@expose('/success')
@login_required
@wwwutils.action_logging
@wwwutils.notify_owner
def success(self):
dag_id = request.args.get('dag_id')
task_id = request.args.get('task_id')
origin = request.args.get('origin')
execution_date = request.args.get('execution_date')
confirmed = request.args.get('confirmed') == "true"
upstream = request.args.get('upstream') == "true"
downstream = request.args.get('downstream') == "true"
future = request.args.get('future') == "true"
past = request.args.get('past') == "true"
return self._mark_task_instance_state(dag_id, task_id, origin, execution_date,
confirmed, upstream, downstream,
future, past, State.SUCCESS)
@expose('/tree')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def tree(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_downstream=False,
include_upstream=True)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = timezone.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
DR = models.DagRun
dag_runs = (
session.query(DR)
.filter(
DR.dag_id == dag.dag_id,
DR.execution_date <= base_date)
.order_by(DR.execution_date.desc())
.limit(num_runs)
.all()
)
dag_runs = {
dr.execution_date: alchemy_to_dict(dr) for dr in dag_runs}
dates = sorted(list(dag_runs.keys()))
max_date = max(dates) if dates else None
min_date = min(dates) if dates else None
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
task_instances = {}
for ti in tis:
tid = alchemy_to_dict(ti)
dr = dag_runs.get(ti.execution_date)
tid['external_trigger'] = dr['external_trigger'] if dr else False
task_instances[(ti.task_id, ti.execution_date)] = tid
expanded = []
# The default recursion traces every path so that tree view has full
# expand/collapse functionality. After 5,000 nodes we stop and fall
# back on a quick DFS search for performance. See PR #320.
node_count = [0]
node_limit = 5000 / max(1, len(dag.roots))
def recurse_nodes(task, visited):
visited.add(task)
node_count[0] += 1
children = [
recurse_nodes(t, visited) for t in task.upstream_list
if node_count[0] < node_limit or t not in visited]
# D3 tree uses children vs _children to define what is
# expanded or not. The following block makes it such that
# repeated nodes are collapsed by default.
children_key = 'children'
if task.task_id not in expanded:
expanded.append(task.task_id)
elif children:
children_key = "_children"
def set_duration(tid):
if (isinstance(tid, dict) and tid.get("state") == State.RUNNING and
tid["start_date"] is not None):
d = timezone.utcnow() - pendulum.parse(tid["start_date"])
tid["duration"] = d.total_seconds()
return tid
return {
'name': task.task_id,
'instances': [
set_duration(task_instances.get((task.task_id, d))) or {
'execution_date': d.isoformat(),
'task_id': task.task_id
}
for d in dates],
children_key: children,
'num_dep': len(task.upstream_list),
'operator': task.task_type,
'retries': task.retries,
'owner': task.owner,
'start_date': task.start_date,
'end_date': task.end_date,
'depends_on_past': task.depends_on_past,
'ui_color': task.ui_color,
}
data = {
'name': '[DAG]',
'children': [recurse_nodes(t, set()) for t in dag.roots],
'instances': [
dag_runs.get(d) or {'execution_date': d.isoformat()}
for d in dates],
}
data = json.dumps(data, indent=4, default=json_ser)
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
return self.render(
'airflow/tree.html',
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
root=root,
form=form,
dag=dag, data=data, blur=blur, num_runs=num_runs)
@expose('/graph')
@login_required
@wwwutils.gzipped
@wwwutils.action_logging
@provide_session
def graph(self, session=None):
dag_id = request.args.get('dag_id')
blur = conf.getboolean('webserver', 'demo_mode')
dag = dagbag.get_dag(dag_id)
if dag_id not in dagbag.dags:
flash('DAG "{0}" seems to be missing.'.format(dag_id), "error")
return redirect('/admin/')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
arrange = request.args.get('arrange', dag.orientation)
nodes = []
edges = []
for task in dag.tasks:
nodes.append({
'id': task.task_id,
'value': {
'label': task.task_id,
'labelStyle': "fill:{0};".format(task.ui_fgcolor),
'style': "fill:{0};".format(task.ui_color),
}
})
def get_upstream(task):
for t in task.upstream_list:
edge = {
'u': t.task_id,
'v': task.task_id,
}
if edge not in edges:
edges.append(edge)
get_upstream(t)
for t in dag.roots:
get_upstream(t)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dt_nr_dr_data['arrange'] = arrange
dttm = dt_nr_dr_data['dttm']
class GraphForm(DateTimeWithNumRunsWithDagRunsForm):
arrange = SelectField("Layout", choices=(
('LR', "Left->Right"),
('RL', "Right->Left"),
('TB', "Top->Bottom"),
('BT', "Bottom->Top"),
))
form = GraphForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
tasks = {
t.task_id: {
'dag_id': t.dag_id,
'task_type': t.task_type,
}
for t in dag.tasks}
if not tasks:
flash("No tasks found", "error")
session.commit()
doc_md = markdown.markdown(dag.doc_md) if hasattr(dag, 'doc_md') and dag.doc_md else ''
return self.render(
'airflow/graph.html',
dag=dag,
form=form,
width=request.args.get('width', "100%"),
height=request.args.get('height', "800"),
execution_date=dttm.isoformat(),
state_token=state_token(dt_nr_dr_data['dr_state']),
doc_md=doc_md,
arrange=arrange,
operators=sorted(
list(set([op.__class__ for op in dag.tasks])),
key=lambda x: x.__name__
),
blur=blur,
root=root or '',
task_instances=json.dumps(task_instances, indent=2),
tasks=json.dumps(tasks, indent=2),
nodes=json.dumps(nodes, indent=2),
edges=json.dumps(edges, indent=2), )
@expose('/duration')
@login_required
@wwwutils.action_logging
@provide_session
def duration(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
cum_chart = nvd3.lineChart(
name="cumLineChart", x_is_date=True, height=chart_height, width="1200")
y = defaultdict(list)
x = defaultdict(list)
cum_y = defaultdict(list)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
TF = models.TaskFail
ti_fails = (
session
.query(TF)
.filter(
TF.dag_id == dag.dag_id,
TF.execution_date >= min_date,
TF.execution_date <= base_date,
TF.task_id.in_([t.task_id for t in dag.tasks]))
.all()
)
fails_totals = defaultdict(int)
for tf in ti_fails:
dict_key = (tf.dag_id, tf.task_id, tf.execution_date)
fails_totals[dict_key] += tf.duration
for ti in tis:
if ti.duration:
dttm = wwwutils.epoch(ti.execution_date)
x[ti.task_id].append(dttm)
y[ti.task_id].append(float(ti.duration))
fails_dict_key = (ti.dag_id, ti.task_id, ti.execution_date)
fails_total = fails_totals[fails_dict_key]
cum_y[ti.task_id].append(float(ti.duration + fails_total))
# determine the most relevant time unit for the set of task instance
# durations for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
cum_y_unit = infer_time_unit([d for t in cum_y.values() for d in t])
# update the y Axis on both charts to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
cum_chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Duration ({})'.format(cum_y_unit))
cum_chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
cum_chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(cum_y[task.task_id],
cum_y_unit))
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
cum_chart.buildcontent()
s_index = cum_chart.htmlcontent.rfind('});')
cum_chart.htmlcontent = (cum_chart.htmlcontent[:s_index] +
"$(function() {$( document ).trigger('chartload') })" +
cum_chart.htmlcontent[s_index:])
return self.render(
'airflow/duration_chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent,
cum_chart=cum_chart.htmlcontent
)
@expose('/tries')
@login_required
@wwwutils.action_logging
@provide_session
def tries(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, y_axis_format='d', height=chart_height,
width="1200")
for task in dag.tasks:
y = []
x = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
dttm = wwwutils.epoch(ti.execution_date)
x.append(dttm)
y.append(ti.try_number)
if x:
chart.add_serie(name=task.task_id, x=x, y=y)
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
tries = sorted(list({ti.try_number for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if tries else None
session.commit()
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
chart=chart.htmlcontent
)
@expose('/landing_times')
@login_required
@wwwutils.action_logging
@provide_session
def landing_times(self, session=None):
default_dag_run = conf.getint('webserver', 'default_dag_run_display_number')
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
base_date = request.args.get('base_date')
num_runs = request.args.get('num_runs')
num_runs = int(num_runs) if num_runs else default_dag_run
if base_date:
base_date = pendulum.parse(base_date)
else:
base_date = dag.latest_execution_date or timezone.utcnow()
dates = dag.date_range(base_date, num=-abs(num_runs))
min_date = dates[0] if dates else datetime(2000, 1, 1)
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
chart_height = get_chart_height(dag)
chart = nvd3.lineChart(
name="lineChart", x_is_date=True, height=chart_height, width="1200")
y = {}
x = {}
for task in dag.tasks:
y[task.task_id] = []
x[task.task_id] = []
for ti in task.get_task_instances(session, start_date=min_date,
end_date=base_date):
if ti.end_date:
ts = ti.execution_date
following_schedule = dag.following_schedule(ts)
if dag.schedule_interval and following_schedule:
ts = following_schedule
dttm = wwwutils.epoch(ti.execution_date)
secs = (ti.end_date - ts).total_seconds()
x[ti.task_id].append(dttm)
y[ti.task_id].append(secs)
# determine the most relevant time unit for the set of landing times
# for the DAG
y_unit = infer_time_unit([d for t in y.values() for d in t])
# update the y Axis to have the correct time units
chart.create_y_axis('yAxis', format='.02f', custom_format=False,
label='Landing Time ({})'.format(y_unit))
chart.axislist['yAxis']['axisLabelDistance'] = '40'
for task in dag.tasks:
if x[task.task_id]:
chart.add_serie(name=task.task_id, x=x[task.task_id],
y=scale_time_units(y[task.task_id], y_unit))
tis = dag.get_task_instances(
session, start_date=min_date, end_date=base_date)
dates = sorted(list({ti.execution_date for ti in tis}))
max_date = max([ti.execution_date for ti in tis]) if dates else None
form = DateTimeWithNumRunsForm(data={'base_date': max_date,
'num_runs': num_runs})
chart.buildcontent()
return self.render(
'airflow/chart.html',
dag=dag,
chart=chart.htmlcontent,
height=str(chart_height + 100) + "px",
demo_mode=conf.getboolean('webserver', 'demo_mode'),
root=root,
form=form,
)
@expose('/paused', methods=['POST'])
@login_required
@wwwutils.action_logging
@provide_session
def paused(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if request.args.get('is_paused') == 'false':
orm_dag.is_paused = True
else:
orm_dag.is_paused = False
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
return "OK"
@expose('/refresh')
@login_required
@wwwutils.action_logging
@provide_session
def refresh(self, session=None):
DagModel = models.DagModel
dag_id = request.args.get('dag_id')
orm_dag = session.query(
DagModel).filter(DagModel.dag_id == dag_id).first()
if orm_dag:
orm_dag.last_expired = timezone.utcnow()
session.merge(orm_dag)
session.commit()
dagbag.get_dag(dag_id)
flash("DAG [{}] is now fresh as a daisy".format(dag_id))
return redirect(request.referrer)
@expose('/refresh_all')
@login_required
@wwwutils.action_logging
def refresh_all(self):
dagbag.collect_dags(only_if_updated=False)
flash("All DAGs are now up to date")
return redirect('/')
@expose('/gantt')
@login_required
@wwwutils.action_logging
@provide_session
def gantt(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
demo_mode = conf.getboolean('webserver', 'demo_mode')
root = request.args.get('root')
if root:
dag = dag.sub_dag(
task_regex=root,
include_upstream=True,
include_downstream=False)
dt_nr_dr_data = get_date_time_num_runs_dag_runs_form_data(request, session, dag)
dttm = dt_nr_dr_data['dttm']
form = DateTimeWithNumRunsWithDagRunsForm(data=dt_nr_dr_data)
form.execution_date.choices = dt_nr_dr_data['dr_choices']
tis = [
ti for ti in dag.get_task_instances(session, dttm, dttm)
if ti.start_date]
tis = sorted(tis, key=lambda ti: ti.start_date)
TF = models.TaskFail
ti_fails = list(itertools.chain(*[(
session
.query(TF)
.filter(TF.dag_id == ti.dag_id,
TF.task_id == ti.task_id,
TF.execution_date == ti.execution_date)
.all()
) for ti in tis]))
tis_with_fails = sorted(tis + ti_fails, key=lambda ti: ti.start_date)
tasks = []
for ti in tis_with_fails:
end_date = ti.end_date if ti.end_date else timezone.utcnow()
state = ti.state if type(ti) == models.TaskInstance else State.FAILED
tasks.append({
'startDate': wwwutils.epoch(ti.start_date),
'endDate': wwwutils.epoch(end_date),
'isoStart': ti.start_date.isoformat()[:-4],
'isoEnd': end_date.isoformat()[:-4],
'taskName': ti.task_id,
'duration': "{}".format(end_date - ti.start_date)[:-4],
'status': state,
'executionDate': ti.execution_date.isoformat(),
})
states = {task['status']: task['status'] for task in tasks}
data = {
'taskNames': [ti.task_id for ti in tis],
'tasks': tasks,
'taskStatus': states,
'height': len(tis) * 25 + 25,
}
session.commit()
return self.render(
'airflow/gantt.html',
dag=dag,
execution_date=dttm.isoformat(),
form=form,
data=json.dumps(data, indent=2),
base_date='',
demo_mode=demo_mode,
root=root,
)
@expose('/object/task_instances')
@login_required
@wwwutils.action_logging
@provide_session
def task_instances(self, session=None):
dag_id = request.args.get('dag_id')
dag = dagbag.get_dag(dag_id)
dttm = request.args.get('execution_date')
if dttm:
dttm = pendulum.parse(dttm)
else:
return ("Error: Invalid execution_date")
task_instances = {
ti.task_id: alchemy_to_dict(ti)
for ti in dag.get_task_instances(session, dttm, dttm)}
return json.dumps(task_instances)
@expose('/variables/<form>', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def variables(self, form):
try:
if request.method == 'POST':
data = request.json
if data:
with create_session() as session:
var = models.Variable(key=form, val=json.dumps(data))
session.add(var)
session.commit()
return ""
else:
return self.render(
'airflow/variables/{}.html'.format(form)
)
except:
# prevent XSS
form = escape(form)
return ("Error: form airflow/variables/{}.html "
"not found.").format(form), 404
@expose('/varimport', methods=["GET", "POST"])
@login_required
@wwwutils.action_logging
def varimport(self):
try:
d = json.load(UTF8_READER(request.files['file']))
except Exception as e:
flash("Missing file or syntax error: {}.".format(e))
else:
for k, v in d.items():
models.Variable.set(k, v, serialize_json=isinstance(v, dict))
flash("{} variable(s) successfully updated.".format(len(d)))
return redirect('/admin/variable')
class HomeView(AdminIndexView):
@expose("/")
@login_required
@provide_session
def index(self, session=None):
DM = models.DagModel
# restrict the dags shown if filter_by_owner and current user is not superuser
do_filter = FILTER_BY_OWNER and (not current_user.is_superuser())
owner_mode = conf.get('webserver', 'OWNER_MODE').strip().lower()
hide_paused_dags_by_default = conf.getboolean('webserver',
'hide_paused_dags_by_default')
show_paused_arg = request.args.get('showPaused', 'None')
def get_int_arg(value, default=0):
try:
return int(value)
except ValueError:
return default
arg_current_page = request.args.get('page', '0')
arg_search_query = request.args.get('search', None)
dags_per_page = PAGE_SIZE
current_page = get_int_arg(arg_current_page, default=0)
if show_paused_arg.strip().lower() == 'false':
hide_paused = True
elif show_paused_arg.strip().lower() == 'true':
hide_paused = False
else:
hide_paused = hide_paused_dags_by_default
# read orm_dags from the db
sql_query = session.query(DM)
if do_filter and owner_mode == 'ldapgroup':
sql_query = sql_query.filter(
~DM.is_subdag,
DM.is_active,
DM.owners.in_(current_user.ldap_groups)
)
elif do_filter and owner_mode == 'user':
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active,
DM.owners == current_user.user.username
)
else:
sql_query = sql_query.filter(
~DM.is_subdag, DM.is_active
)
# optionally filter out "paused" dags
if hide_paused:
sql_query = sql_query.filter(~DM.is_paused)
orm_dags = {dag.dag_id: dag for dag
in sql_query
.all()}
import_errors = session.query(models.ImportError).all()
for ie in import_errors:
flash(
"Broken DAG: [{ie.filename}] {ie.stacktrace}".format(ie=ie),
"error")
# get a list of all non-subdag dags visible to everyone
# optionally filter out "paused" dags
if hide_paused:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag and not dag.is_paused]
else:
unfiltered_webserver_dags = [dag for dag in dagbag.dags.values() if
not dag.parent_dag]
# optionally filter to get only dags that the user should see
if do_filter and owner_mode == 'ldapgroup':
# only show dags owned by someone in @current_user.ldap_groups
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner in current_user.ldap_groups
}
elif do_filter and owner_mode == 'user':
# only show dags owned by @current_user.user.username
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
if dag.owner == current_user.user.username
}
else:
webserver_dags = {
dag.dag_id: dag
for dag in unfiltered_webserver_dags
}
if arg_search_query:
lower_search_query = arg_search_query.lower()
# filter by dag_id
webserver_dags_filtered = {
dag_id: dag
for dag_id, dag in webserver_dags.items()
if (lower_search_query in dag_id.lower() or
lower_search_query in dag.owner.lower())
}
all_dag_ids = (set([dag.dag_id for dag in orm_dags.values()
if lower_search_query in dag.dag_id.lower() or
lower_search_query in dag.owners.lower()]) |
set(webserver_dags_filtered.keys()))
sorted_dag_ids = sorted(all_dag_ids)
else:
webserver_dags_filtered = webserver_dags
sorted_dag_ids = sorted(set(orm_dags.keys()) | set(webserver_dags.keys()))
start = current_page * dags_per_page
end = start + dags_per_page
num_of_all_dags = len(sorted_dag_ids)
page_dag_ids = sorted_dag_ids[start:end]
num_of_pages = int(math.ceil(num_of_all_dags / float(dags_per_page)))
auto_complete_data = set()
for dag in webserver_dags_filtered.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owner)
for dag in orm_dags.values():
auto_complete_data.add(dag.dag_id)
auto_complete_data.add(dag.owners)
return self.render(
'airflow/dags.html',
webserver_dags=webserver_dags_filtered,
orm_dags=orm_dags,
hide_paused=hide_paused,
current_page=current_page,
search_query=arg_search_query if arg_search_query else '',
page_size=dags_per_page,
num_of_pages=num_of_pages,
num_dag_from=start + 1,
num_dag_to=min(end, num_of_all_dags),
num_of_all_dags=num_of_all_dags,
paging=wwwutils.generate_pages(current_page, num_of_pages,
search=arg_search_query,
showPaused=not hide_paused),
dag_ids_in_page=page_dag_ids,
auto_complete_data=auto_complete_data)
class QueryView(wwwutils.DataProfilingMixin, BaseView):
@expose('/', methods=['POST', 'GET'])
@wwwutils.gzipped
@provide_session
def query(self, session=None):
dbs = session.query(models.Connection).order_by(
models.Connection.conn_id).all()
session.expunge_all()
db_choices = list(
((db.conn_id, db.conn_id) for db in dbs if db.get_hook()))
conn_id_str = request.form.get('conn_id')
csv = request.form.get('csv') == "true"
sql = request.form.get('sql')
class QueryForm(Form):
conn_id = SelectField("Layout", choices=db_choices)
sql = TextAreaField("SQL", widget=wwwutils.AceEditorWidget())
data = {
'conn_id': conn_id_str,
'sql': sql,
}
results = None
has_data = False
error = False
if conn_id_str:
db = [db for db in dbs if db.conn_id == conn_id_str][0]
hook = db.get_hook()
try:
df = hook.get_pandas_df(wwwutils.limit_sql(sql, QUERY_LIMIT, conn_type=db.conn_type))
# df = hook.get_pandas_df(sql)
has_data = len(df) > 0
df = df.fillna('')
results = df.to_html(
classes=[
'table', 'table-bordered', 'table-striped', 'no-wrap'],
index=False,
na_rep='',
) if has_data else ''
except Exception as e:
flash(str(e), 'error')
error = True
if has_data and len(df) == QUERY_LIMIT:
flash(
"Query output truncated at " + str(QUERY_LIMIT) +
" rows", 'info')
if not has_data and error:
flash('No data', 'error')
if csv:
return Response(
response=df.to_csv(index=False),
status=200,
mimetype="application/text")
form = QueryForm(request.form, data=data)
session.commit()
return self.render(
'airflow/query.html', form=form,
title="Ad Hoc Query",
results=results or '',
has_data=has_data)
class AirflowModelView(ModelView):
list_template = 'airflow/model_list.html'
edit_template = 'airflow/model_edit.html'
create_template = 'airflow/model_create.html'
column_display_actions = True
page_size = PAGE_SIZE
class ModelViewOnly(wwwutils.LoginMixin, AirflowModelView):
"""
Modifying the base ModelView class for non edit, browse only operations
"""
named_filter_urls = True
can_create = False
can_edit = False
can_delete = False
column_display_pk = True
class PoolModelView(wwwutils.SuperUserMixin, AirflowModelView):
column_list = ('pool', 'slots', 'used_slots', 'queued_slots')
column_formatters = dict(
pool=pool_link, used_slots=fused_slots, queued_slots=fqueued_slots)
named_filter_urls = True
form_args = {
'pool': {
'validators': [
validators.DataRequired(),
]
}
}
class SlaMissModelView(wwwutils.SuperUserMixin, ModelViewOnly):
verbose_name_plural = "SLA misses"
verbose_name = "SLA miss"
column_list = (
'dag_id', 'task_id', 'execution_date', 'email_sent', 'timestamp')
column_formatters = dict(
task_id=task_instance_link,
execution_date=datetime_f,
timestamp=datetime_f,
dag_id=dag_link)
named_filter_urls = True
column_searchable_list = ('dag_id', 'task_id',)
column_filters = (
'dag_id', 'task_id', 'email_sent', 'timestamp', 'execution_date')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'email_sent': {'disabled': True},
'timestamp': {'disabled': True},
}
@provide_session
def _connection_ids(session=None):
return [
(c.conn_id, c.conn_id)
for c in (
session.query(models.Connection.conn_id)
.group_by(models.Connection.conn_id)
)
]
class ChartModelView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "chart"
verbose_name_plural = "charts"
form_columns = (
'label',
'owner',
'conn_id',
'chart_type',
'show_datatable',
'x_is_date',
'y_log_scale',
'show_sql',
'height',
'sql_layout',
'sql',
'default_params',
)
column_list = (
'label',
'conn_id',
'chart_type',
'owner',
'last_modified',
)
column_sortable_list = (
'label',
'conn_id',
'chart_type',
('owner', 'owner.username'),
'last_modified',
)
column_formatters = dict(label=label_link, last_modified=datetime_f)
column_default_sort = ('last_modified', True)
create_template = 'airflow/chart/create.html'
edit_template = 'airflow/chart/edit.html'
column_filters = ('label', 'owner.username', 'conn_id')
column_searchable_list = ('owner.username', 'label', 'sql')
column_descriptions = {
'label': "Can include {{ templated_fields }} and {{ macros }}",
'chart_type': "The type of chart to be displayed",
'sql': "Can include {{ templated_fields }} and {{ macros }}.",
'height': "Height of the chart, in pixels.",
'conn_id': "Source database to run the query against",
'x_is_date': (
"Whether the X axis should be casted as a date field. Expect most "
"intelligible date formats to get casted properly."
),
'owner': (
"The chart's owner, mostly used for reference and filtering in "
"the list view."
),
'show_datatable':
"Whether to display an interactive data table under the chart.",
'default_params': (
'A dictionary of {"key": "values",} that define what the '
'templated fields (parameters) values should be by default. '
'To be valid, it needs to "eval" as a Python dict. '
'The key values will show up in the url\'s querystring '
'and can be altered there.'
),
'show_sql': "Whether to display the SQL statement as a collapsible "
"section in the chart page.",
'y_log_scale': "Whether to use a log scale for the Y axis.",
'sql_layout': (
"Defines the layout of the SQL that the application should "
"expect. Depending on the tables you are sourcing from, it may "
"make more sense to pivot / unpivot the metrics."
),
}
column_labels = {
'sql': "SQL",
'height': "Chart Height",
'sql_layout': "SQL Layout",
'show_sql': "Display the SQL Statement",
'default_params': "Default Parameters",
}
form_choices = {
'chart_type': [
('line', 'Line Chart'),
('spline', 'Spline Chart'),
('bar', 'Bar Chart'),
('column', 'Column Chart'),
('area', 'Overlapping Area Chart'),
('stacked_area', 'Stacked Area Chart'),
('percent_area', 'Percent Area Chart'),
('datatable', 'No chart, data table only'),
],
'sql_layout': [
('series', 'SELECT series, x, y FROM ...'),
('columns', 'SELECT x, y (series 1), y (series 2), ... FROM ...'),
],
'conn_id': _connection_ids()
}
def on_model_change(self, form, model, is_created=True):
if model.iteration_no is None:
model.iteration_no = 0
else:
model.iteration_no += 1
if not model.user_id and current_user and hasattr(current_user, 'id'):
model.user_id = current_user.id
model.last_modified = timezone.utcnow()
chart_mapping = (
('line', 'lineChart'),
('spline', 'lineChart'),
('bar', 'multiBarChart'),
('column', 'multiBarChart'),
('area', 'stackedAreaChart'),
('stacked_area', 'stackedAreaChart'),
('percent_area', 'stackedAreaChart'),
('datatable', 'datatable'),
)
chart_mapping = dict(chart_mapping)
class KnownEventView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "known event"
verbose_name_plural = "known events"
form_columns = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
'description',
)
form_args = {
'label': {
'validators': [
validators.DataRequired(),
],
},
'event_type': {
'validators': [
validators.DataRequired(),
],
},
'start_date': {
'validators': [
validators.DataRequired(),
],
'filters': [
parse_datetime_f,
],
},
'end_date': {
'validators': [
validators.DataRequired(),
GreaterEqualThan(fieldname='start_date'),
],
'filters': [
parse_datetime_f,
]
},
'reported_by': {
'validators': [
validators.DataRequired(),
],
}
}
column_list = (
'label',
'event_type',
'start_date',
'end_date',
'reported_by',
)
column_default_sort = ("start_date", True)
column_sortable_list = (
'label',
# todo: yes this has a spelling error
('event_type', 'event_type.know_event_type'),
'start_date',
'end_date',
('reported_by', 'reported_by.username'),
)
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(start_date=DateTimeField, end_date=DateTimeField)
class KnownEventTypeView(wwwutils.DataProfilingMixin, AirflowModelView):
pass
# NOTE: For debugging / troubleshooting
# mv = KnowEventTypeView(
# models.KnownEventType,
# Session, name="Known Event Types", category="Manage")
# admin.add_view(mv)
# class DagPickleView(SuperUserMixin, ModelView):
# pass
# mv = DagPickleView(
# models.DagPickle,
# Session, name="Pickles", category="Manage")
# admin.add_view(mv)
class VariableView(wwwutils.DataProfilingMixin, AirflowModelView):
verbose_name = "Variable"
verbose_name_plural = "Variables"
list_template = 'airflow/variable_list.html'
def hidden_field_formatter(view, context, model, name):
if wwwutils.should_hide_value_for_key(model.key):
return Markup('*' * 8)
val = getattr(model, name)
if val:
return val
else:
return Markup('<span class="label label-danger">Invalid</span>')
form_columns = (
'key',
'val',
)
column_list = ('key', 'val', 'is_encrypted',)
column_filters = ('key', 'val')
column_searchable_list = ('key', 'val', 'is_encrypted',)
column_default_sort = ('key', False)
form_widget_args = {
'is_encrypted': {'disabled': True},
'val': {
'rows': 20,
}
}
form_args = {
'key': {
'validators': {
validators.DataRequired(),
},
},
}
column_sortable_list = (
'key',
'val',
'is_encrypted',
)
column_formatters = {
'val': hidden_field_formatter,
}
# Default flask-admin export functionality doesn't handle serialized json
@action('varexport', 'Export', None)
@provide_session
def action_varexport(self, ids, session=None):
V = models.Variable
qry = session.query(V).filter(V.id.in_(ids)).all()
var_dict = {}
d = json.JSONDecoder()
for var in qry:
val = None
try:
val = d.decode(var.val)
except:
val = var.val
var_dict[var.key] = val
response = make_response(json.dumps(var_dict, sort_keys=True, indent=4))
response.headers["Content-Disposition"] = "attachment; filename=variables.json"
return response
def on_form_prefill(self, form, id):
if wwwutils.should_hide_value_for_key(form.key.data):
form.val.data = '*' * 8
class XComView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "XCom"
verbose_name_plural = "XComs"
form_columns = (
'key',
'value',
'execution_date',
'task_id',
'dag_id',
)
form_extra_fields = {
'value': StringField('Value'),
}
form_args = {
'execution_date': {
'filters': [
parse_datetime_f,
]
}
}
column_filters = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
column_searchable_list = ('key', 'timestamp', 'execution_date', 'task_id', 'dag_id')
filter_converter = wwwutils.UtcFilterConverter()
form_overrides = dict(execution_date=DateTimeField)
class JobModelView(ModelViewOnly):
verbose_name_plural = "jobs"
verbose_name = "job"
column_display_actions = False
column_default_sort = ('start_date', True)
column_filters = (
'job_type', 'dag_id', 'state',
'unixname', 'hostname', 'start_date', 'end_date', 'latest_heartbeat')
column_formatters = dict(
start_date=datetime_f,
end_date=datetime_f,
hostname=nobr_f,
state=state_f,
latest_heartbeat=datetime_f)
filter_converter = wwwutils.UtcFilterConverter()
class DagRunModelView(ModelViewOnly):
verbose_name_plural = "DAG Runs"
can_edit = True
can_create = True
column_editable_list = ('state',)
verbose_name = "dag run"
column_default_sort = ('execution_date', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
form_args = dict(
dag_id=dict(validators=[validators.DataRequired()])
)
column_list = (
'state', 'dag_id', 'execution_date', 'run_id', 'external_trigger')
column_filters = column_list
filter_converter = wwwutils.UtcFilterConverter()
column_searchable_list = ('dag_id', 'state', 'run_id')
column_formatters = dict(
execution_date=datetime_f,
state=state_f,
start_date=datetime_f,
dag_id=dag_link,
run_id=dag_run_link
)
@action('new_delete', "Delete", "Are you sure you want to delete selected records?")
@provide_session
def action_new_delete(self, ids, session=None):
deleted = set(session.query(models.DagRun)
.filter(models.DagRun.id.in_(ids))
.all())
session.query(models.DagRun) \
.filter(models.DagRun.id.in_(ids)) \
.delete(synchronize_session='fetch')
session.commit()
dirty_ids = []
for row in deleted:
dirty_ids.append(row.dag_id)
models.DagStat.update(dirty_ids, dirty_only=False, session=session)
@action('set_running', "Set state to 'running'", None)
@provide_session
def action_set_running(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
models.DagStat.update(dirty_ids, session=session)
flash(
"{count} dag runs were set to running".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@action('set_failed', "Set state to 'failed'",
"All running task instances would also be marked as failed, are you sure?")
@provide_session
def action_set_failed(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_failed(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to failed".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
@action('set_success', "Set state to 'success'",
"All task instances would also be marked as success, are you sure?")
@provide_session
def action_set_success(self, ids, session=None):
try:
DR = models.DagRun
count = 0
dirty_ids = []
altered_tis = []
for dr in session.query(DR).filter(DR.id.in_(ids)).all():
dirty_ids.append(dr.dag_id)
count += 1
altered_tis += \
set_dag_run_state_to_success(dagbag.get_dag(dr.dag_id),
dr.execution_date,
commit=True,
session=session)
models.DagStat.update(dirty_ids, session=session)
altered_ti_count = len(altered_tis)
flash(
"{count} dag runs and {altered_ti_count} task instances "
"were set to success".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
# Called after editing DagRun model in the UI.
@provide_session
def after_model_change(self, form, dagrun, is_created, session=None):
altered_tis = []
if dagrun.state == State.SUCCESS:
altered_tis = set_dag_run_state_to_success(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True)
elif dagrun.state == State.FAILED:
altered_tis = set_dag_run_state_to_failed(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True,
session=session)
elif dagrun.state == State.RUNNING:
altered_tis = set_dag_run_state_to_running(
dagbag.get_dag(dagrun.dag_id),
dagrun.execution_date,
commit=True,
session=session)
altered_ti_count = len(altered_tis)
models.DagStat.update([dagrun.dag_id], session=session)
flash(
"1 dag run and {altered_ti_count} task instances "
"were set to '{dagrun.state}'".format(**locals()))
class LogModelView(ModelViewOnly):
verbose_name_plural = "logs"
verbose_name = "log"
column_display_actions = False
column_default_sort = ('dttm', True)
column_filters = ('dag_id', 'task_id', 'execution_date', 'extra')
filter_converter = wwwutils.UtcFilterConverter()
column_formatters = dict(
dttm=datetime_f, execution_date=datetime_f, dag_id=dag_link)
class TaskInstanceModelView(ModelViewOnly):
verbose_name_plural = "task instances"
verbose_name = "task instance"
column_filters = (
'state', 'dag_id', 'task_id', 'execution_date', 'hostname',
'queue', 'pool', 'operator', 'start_date', 'end_date')
filter_converter = wwwutils.UtcFilterConverter()
named_filter_urls = True
column_formatters = dict(
log_url=log_url_formatter,
task_id=task_instance_link,
hostname=nobr_f,
state=state_f,
execution_date=datetime_f,
start_date=datetime_f,
end_date=datetime_f,
queued_dttm=datetime_f,
dag_id=dag_link,
run_id=dag_run_link,
duration=duration_f)
column_searchable_list = ('dag_id', 'task_id', 'state')
column_default_sort = ('job_id', True)
form_choices = {
'state': [
('success', 'success'),
('running', 'running'),
('failed', 'failed'),
],
}
column_list = (
'state', 'dag_id', 'task_id', 'execution_date', 'operator',
'start_date', 'end_date', 'duration', 'job_id', 'hostname',
'unixname', 'priority_weight', 'queue', 'queued_dttm', 'try_number',
'pool', 'log_url')
page_size = PAGE_SIZE
@action('set_running', "Set state to 'running'", None)
def action_set_running(self, ids):
self.set_task_instance_state(ids, State.RUNNING)
@action('set_failed', "Set state to 'failed'", None)
def action_set_failed(self, ids):
self.set_task_instance_state(ids, State.FAILED)
@action('set_success', "Set state to 'success'", None)
def action_set_success(self, ids):
self.set_task_instance_state(ids, State.SUCCESS)
@action('set_retry', "Set state to 'up_for_retry'", None)
def action_set_retry(self, ids):
self.set_task_instance_state(ids, State.UP_FOR_RETRY)
@provide_session
@action('clear',
lazy_gettext('Clear'),
lazy_gettext(
'Are you sure you want to clear the state of the selected task instance(s)'
' and set their dagruns to the running state?'))
def action_clear(self, ids, session=None):
try:
TI = models.TaskInstance
dag_to_task_details = {}
dag_to_tis = {}
# Collect dags upfront as dagbag.get_dag() will reset the session
for id_str in ids:
task_id, dag_id, execution_date = iterdecode(id_str)
dag = dagbag.get_dag(dag_id)
task_details = dag_to_task_details.setdefault(dag, [])
task_details.append((task_id, execution_date))
for dag, task_details in dag_to_task_details.items():
for task_id, execution_date in task_details:
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag.dag_id,
TI.execution_date == execution_date).one()
tis = dag_to_tis.setdefault(dag, [])
tis.append(ti)
for dag, tis in dag_to_tis.items():
models.clear_task_instances(tis, session, dag=dag)
session.commit()
flash("{0} task instances have been cleared".format(len(ids)))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to clear task instances', 'error')
@provide_session
def set_task_instance_state(self, ids, target_state, session=None):
try:
TI = models.TaskInstance
count = len(ids)
for id in ids:
task_id, dag_id, execution_date = iterdecode(id)
execution_date = parse_execution_date(execution_date)
ti = session.query(TI).filter(TI.task_id == task_id,
TI.dag_id == dag_id,
TI.execution_date == execution_date).one()
ti.state = target_state
session.commit()
flash(
"{count} task instances were set to '{target_state}'".format(**locals()))
except Exception as ex:
if not self.handle_view_exception(ex):
raise Exception("Ooops")
flash('Failed to set state', 'error')
def get_one(self, id):
"""
As a workaround for AIRFLOW-252, this method overrides Flask-Admin's ModelView.get_one().
TODO: this method should be removed once the below bug is fixed on Flask-Admin side.
https://github.com/flask-admin/flask-admin/issues/1226
"""
task_id, dag_id, execution_date = iterdecode(id)
execution_date = pendulum.parse(execution_date)
return self.session.query(self.model).get((task_id, dag_id, execution_date))
class ConnectionModelView(wwwutils.SuperUserMixin, AirflowModelView):
create_template = 'airflow/conn_create.html'
edit_template = 'airflow/conn_edit.html'
list_template = 'airflow/conn_list.html'
form_columns = (
'conn_id',
'conn_type',
'host',
'schema',
'login',
'password',
'port',
'extra',
'extra__jdbc__drv_path',
'extra__jdbc__drv_clsname',
'extra__google_cloud_platform__project',
'extra__google_cloud_platform__key_path',
'extra__google_cloud_platform__keyfile_dict',
'extra__google_cloud_platform__scope',
)
verbose_name = "Connection"
verbose_name_plural = "Connections"
column_default_sort = ('conn_id', False)
column_list = ('conn_id', 'conn_type', 'host', 'port', 'is_encrypted', 'is_extra_encrypted',)
form_overrides = dict(_password=PasswordField, _extra=TextAreaField)
form_widget_args = {
'is_extra_encrypted': {'disabled': True},
'is_encrypted': {'disabled': True},
}
# Used to customized the form, the forms elements get rendered
# and results are stored in the extra field as json. All of these
# need to be prefixed with extra__ and then the conn_type ___ as in
# extra__{conn_type}__name. You can also hide form elements and rename
# others from the connection_form.js file
form_extra_fields = {
'extra__jdbc__drv_path': StringField('Driver Path'),
'extra__jdbc__drv_clsname': StringField('Driver Class'),
'extra__google_cloud_platform__project': StringField('Project Id'),
'extra__google_cloud_platform__key_path': StringField('Keyfile Path'),
'extra__google_cloud_platform__keyfile_dict': PasswordField('Keyfile JSON'),
'extra__google_cloud_platform__scope': StringField('Scopes (comma separated)'),
}
form_choices = {
'conn_type': models.Connection._types
}
def on_model_change(self, form, model, is_created):
formdata = form.data
if formdata['conn_type'] in ['jdbc', 'google_cloud_platform']:
extra = {
key: formdata[key]
for key in self.form_extra_fields.keys() if key in formdata}
model.extra = json.dumps(extra)
@classmethod
def alert_fernet_key(cls):
fk = None
try:
fk = conf.get('core', 'fernet_key')
except:
pass
return fk is None
@classmethod
def is_secure(cls):
"""
Used to display a message in the Connection list view making it clear
that the passwords and `extra` field can't be encrypted.
"""
is_secure = False
try:
import cryptography
conf.get('core', 'fernet_key')
is_secure = True
except:
pass
return is_secure
def on_form_prefill(self, form, id):
try:
d = json.loads(form.data.get('extra', '{}'))
except Exception:
d = {}
for field in list(self.form_extra_fields.keys()):
value = d.get(field, '')
if value:
field = getattr(form, field)
field.data = value
class UserModelView(wwwutils.SuperUserMixin, AirflowModelView):
verbose_name = "User"
verbose_name_plural = "Users"
column_default_sort = 'username'
class VersionView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def version(self):
# Look at the version from setup.py
try:
airflow_version = pkg_resources.require("apache-airflow")[0].version
except Exception as e:
airflow_version = None
logging.error(e)
# Get the Git repo and git hash
git_version = None
try:
with open(os.path.join(*[settings.AIRFLOW_HOME, 'airflow', 'git_version'])) as f:
git_version = f.readline()
except Exception as e:
logging.error(e)
# Render information
title = "Version Info"
return self.render('airflow/version.html',
title=title,
airflow_version=airflow_version,
git_version=git_version)
class ConfigurationView(wwwutils.SuperUserMixin, BaseView):
@expose('/')
def conf(self):
raw = request.args.get('raw') == "true"
title = "Airflow Configuration"
subtitle = conf.AIRFLOW_CONFIG
if conf.getboolean("webserver", "expose_config"):
with open(conf.AIRFLOW_CONFIG, 'r') as f:
config = f.read()
table = [(section, key, value, source)
for section, parameters in conf.as_dict(True, True).items()
for key, (value, source) in parameters.items()]
else:
config = (
"# Your Airflow administrator chose not to expose the "
"configuration, most likely for security reasons.")
table = None
if raw:
return Response(
response=config,
status=200,
mimetype="application/text")
else:
code_html = Markup(highlight(
config,
lexers.IniLexer(), # Lexer call
HtmlFormatter(noclasses=True))
)
return self.render(
'airflow/config.html',
pre_subtitle=settings.HEADER + " v" + airflow.__version__,
code_html=code_html, title=title, subtitle=subtitle,
table=table)
class DagModelView(wwwutils.SuperUserMixin, ModelView):
column_list = ('dag_id', 'owners')
column_editable_list = ('is_paused',)
form_excluded_columns = ('is_subdag', 'is_active')
column_searchable_list = ('dag_id',)
column_filters = (
'dag_id', 'owners', 'is_paused', 'is_active', 'is_subdag',
'last_scheduler_run', 'last_expired')
filter_converter = wwwutils.UtcFilterConverter()
form_widget_args = {
'last_scheduler_run': {'disabled': True},
'fileloc': {'disabled': True},
'is_paused': {'disabled': True},
'last_pickled': {'disabled': True},
'pickle_id': {'disabled': True},
'last_loaded': {'disabled': True},
'last_expired': {'disabled': True},
'pickle_size': {'disabled': True},
'scheduler_lock': {'disabled': True},
'owners': {'disabled': True},
}
column_formatters = dict(
dag_id=dag_link,
)
can_delete = False
can_create = False
page_size = PAGE_SIZE
list_template = 'airflow/list_dags.html'
named_filter_urls = True
def get_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_query()
.filter(or_(models.DagModel.is_active, models.DagModel.is_paused))
.filter(~models.DagModel.is_subdag)
)
def get_count_query(self):
"""
Default filters for model
"""
return (
super(DagModelView, self)
.get_count_query()
.filter(models.DagModel.is_active)
.filter(~models.DagModel.is_subdag)
)
| apache-2.0 |
chenyyx/scikit-learn-doc-zh | examples/en/neighbors/plot_digits_kde_sampling.py | 108 | 2026 | """
=========================
Kernel Density Estimation
=========================
This example shows how kernel density estimation (KDE), a powerful
non-parametric density estimation technique, can be used to learn
a generative model for a dataset. With this generative model in place,
new samples can be drawn. These new samples reflect the underlying model
of the data.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.neighbors import KernelDensity
from sklearn.decomposition import PCA
from sklearn.model_selection import GridSearchCV
# load the data
digits = load_digits()
data = digits.data
# project the 64-dimensional data to a lower dimension
pca = PCA(n_components=15, whiten=False)
data = pca.fit_transform(digits.data)
# use grid search cross-validation to optimize the bandwidth
params = {'bandwidth': np.logspace(-1, 1, 20)}
grid = GridSearchCV(KernelDensity(), params)
grid.fit(data)
print("best bandwidth: {0}".format(grid.best_estimator_.bandwidth))
# use the best estimator to compute the kernel density estimate
kde = grid.best_estimator_
# sample 44 new points from the data
new_data = kde.sample(44, random_state=0)
new_data = pca.inverse_transform(new_data)
# turn data into a 4x11 grid
new_data = new_data.reshape((4, 11, -1))
real_data = digits.data[:44].reshape((4, 11, -1))
# plot real digits and resampled digits
fig, ax = plt.subplots(9, 11, subplot_kw=dict(xticks=[], yticks=[]))
for j in range(11):
ax[4, j].set_visible(False)
for i in range(4):
im = ax[i, j].imshow(real_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
im = ax[i + 5, j].imshow(new_data[i, j].reshape((8, 8)),
cmap=plt.cm.binary, interpolation='nearest')
im.set_clim(0, 16)
ax[0, 5].set_title('Selection from the input data')
ax[5, 5].set_title('"New" digits drawn from the kernel density model')
plt.show()
| gpl-3.0 |
mbkumar/pymatgen | setup.py | 1 | 9254 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
"""Setup.py for pymatgen."""
import sys
import platform
from setuptools import setup, find_packages, Extension
from setuptools.command.build_ext import build_ext as _build_ext
class build_ext(_build_ext):
"""Extension builder that checks for numpy before install."""
def finalize_options(self):
"""Override finalize_options."""
_build_ext.finalize_options(self)
# Prevent numpy from thinking it is still in its setup process:
import builtins
if hasattr(builtins, '__NUMPY_SETUP__'):
del builtins.__NUMPY_SETUP__
import importlib
import numpy
importlib.reload(numpy)
self.include_dirs.append(numpy.get_include())
extra_link_args = []
if sys.platform.startswith('win') and platform.machine().endswith('64'):
extra_link_args.append('-Wl,--allow-multiple-definition')
cpp_extra_link_args = extra_link_args
cpp_extra_compile_args = ["-Wno-cpp", "-Wno-unused-function", "-O2", "-march=native", '-std=c++0x']
if sys.platform.startswith('darwin'):
cpp_extra_compile_args.append("-stdlib=libc++")
cpp_extra_link_args = ["-O2", "-march=native", '-stdlib=libc++']
# https://docs.microsoft.com/en-us/cpp/build/reference/compiler-options-listed-alphabetically?view=vs-2017
if sys.platform.startswith('win'):
cpp_extra_compile_args = ['/w', '/O2', '/std:c++0x']
cpp_extra_link_args = extra_link_args
long_desc = """
Official docs: [http://pymatgen.org](http://pymatgen.org/)
Pymatgen (Python Materials Genomics) is a robust, open-source Python library
for materials analysis. These are some of the main features:
1. Highly flexible classes for the representation of Element, Site, Molecule,
Structure objects.
2. Extensive input/output support, including support for
[VASP](http://cms.mpi.univie.ac.at/vasp/), [ABINIT](http://www.abinit.org/),
CIF, Gaussian, XYZ, and many other file formats.
3. Powerful analysis tools, including generation of phase diagrams, Pourbaix
diagrams, diffusion analyses, reactions, etc.
4. Electronic structure analyses, such as density of states and band structure.
5. Integration with the Materials Project REST API.
Pymatgen is free to use. However, we also welcome your help to improve this
library by making your own contributions. These contributions can be in the
form of additional tools or modules you develop, or feature requests and bug
reports. Please report any bugs and issues at pymatgen's [Github page]
(https://github.com/materialsproject/pymatgen). For help with any pymatgen
issues, please use the [Discourse page](https://discuss.matsci.org/c/pymatgen).
Why use pymatgen?
=================
There are many materials analysis codes out there, both commerical and free,
but pymatgen offer several advantages:
1. **It is (fairly) robust.** Pymatgen is used by thousands of researchers,
and is the analysis code powering the [Materials Project](https://www.materialsproject.org).
The analysis it produces survives rigorous scrutiny every single day. Bugs
tend to be found and corrected quickly. Pymatgen also uses
[CircleCI](https://circleci.com) and [Appveyor](https://www.appveyor.com/)
for continuous integration on the Linux and Windows platforms,
respectively, which ensures that every commit passes a comprehensive suite
of unittests.
2. **It is well documented.** A fairly comprehensive documentation has been
written to help you get to grips with it quickly.
3. **It is open.** You are free to use and contribute to pymatgen. It also means
that pymatgen is continuously being improved. We will attribute any code you
contribute to any publication you specify. Contributing to pymatgen means
your research becomes more visible, which translates to greater impact.
4. **It is fast.** Many of the core numerical methods in pymatgen have been
optimized by vectorizing in numpy/scipy. This means that coordinate
manipulations are extremely fast and are in fact comparable to codes
written in other languages. Pymatgen also comes with a complete system for
handling periodic boundary conditions.
5. **It will be around.** Pymatgen is not a pet research project. It is used in
the well-established Materials Project. It is also actively being developed
and maintained by the [Materials Virtual Lab](https://www.materialsvirtuallab.org),
the ABINIT group and many other research groups.
With effect from version 2019.1.1, pymatgen only supports Python 3.x. Users
who require Python 2.7 should install pymatgen v2018.x.
"""
setup(
name="pymatgen",
packages=find_packages(),
version="2020.7.3",
cmdclass={'build_ext': build_ext},
setup_requires=['numpy>=1.14.3', 'setuptools>=18.0'],
python_requires='>=3.6',
install_requires=["numpy>=1.14.3", "requests", "ruamel.yaml>=0.15.6",
"monty>=3.0.2", "scipy>=1.5.0",
"tabulate", "spglib>=1.9.9.44", "networkx>=2.2",
"matplotlib>=1.5", "palettable>=3.1.1", "sympy", "pandas",
"plotly>=4.5.0"],
extras_require={
"provenance": ["pybtex"],
"ase": ["ase>=3.3"],
"vis": ["vtk>=6.0.0"],
"abinit": ["netcdf4"],
':python_version < "3.7"': [
"dataclasses>=0.6",
]},
package_data={
"pymatgen.core": ["*.json", "py.typed"],
"pymatgen.analysis": ["*.yaml", "*.json", "*.csv"],
"pymatgen.analysis.chemenv.coordination_environments.coordination_geometries_files": ["*.txt", "*.json"],
"pymatgen.analysis.chemenv.coordination_environments.strategy_files": ["*.json"],
"pymatgen.analysis.magnetism": ["*.json", "*.yaml"],
"pymatgen.analysis.structure_prediction": ["data/*.json", "*.yaml"],
"pymatgen.io": ["*.yaml"],
"pymatgen.io.vasp": ["*.yaml", "*.json"],
"pymatgen.io.lammps": ["templates/*.*", "*.yaml"],
"pymatgen.io.lobster": ["lobster_basis/*.yaml"],
"pymatgen.io.feff": ["*.yaml"],
"pymatgen.symmetry": ["*.yaml", "*.json", "*.sqlite"],
"pymatgen.entries": ["*.yaml"],
"pymatgen.vis": ["ElementColorSchemes.yaml"],
"pymatgen.command_line": ["OxideTersoffPotentials"],
"pymatgen.analysis.defects": ["*.json"],
"pymatgen.analysis.diffraction": ["*.json"],
"pymatgen.util": ["structures/*.json"]},
author="Pymatgen Development Team",
author_email="ongsp@eng.ucsd.edu",
maintainer="Shyue Ping Ong, Matthew Horton",
maintainer_email="ongsp@eng.ucsd.edu, mkhorton@lbl.gov",
url="http://www.pymatgen.org",
license="MIT",
description="Python Materials Genomics is a robust materials "
"analysis code that defines core object representations for "
"structures and molecules with support for many electronic "
"structure codes. It is currently the core analysis code "
"powering the Materials Project "
"(https://www.materialsproject.org).",
long_description=long_desc,
long_description_content_type='text/markdown',
keywords=["VASP", "gaussian", "ABINIT", "nwchem", "qchem", "materials", "science",
"project", "electronic", "structure", "analysis", "phase", "diagrams",
"crystal"],
classifiers=[
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Development Status :: 4 - Beta",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Information Analysis",
"Topic :: Scientific/Engineering :: Physics",
"Topic :: Scientific/Engineering :: Chemistry",
"Topic :: Software Development :: Libraries :: Python Modules"
],
ext_modules=[Extension("pymatgen.optimization.linear_assignment",
["pymatgen/optimization/linear_assignment.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.util.coord_cython",
["pymatgen/util/coord_cython.c"],
extra_link_args=extra_link_args),
Extension("pymatgen.optimization.neighbors",
["pymatgen/optimization/neighbors.cpp"],
extra_compile_args=cpp_extra_compile_args,
extra_link_args=cpp_extra_link_args,
language='c++')],
entry_points={
'console_scripts': [
'pmg = pymatgen.cli.pmg:main',
'feff_input_generation = pymatgen.cli.feff_input_generation:main',
'feff_plot_cross_section = pymatgen.cli.feff_plot_cross_section:main',
'feff_plot_dos = pymatgen.cli.feff_plot_dos:main',
'gaussian_analyzer = pymatgen.cli.gaussian_analyzer:main',
'get_environment = pymatgen.cli.get_environment:main',
]
}
) | mit |
ronojoy/BDA_py_demos | demos_ch5/demo5_2.py | 19 | 3326 | """Bayesian Data Analysis, 3rd ed
Chapter 5, demo 2
Hierarchical model for SAT-example data (BDA3, p. 102)
"""
from __future__ import division
import numpy as np
from scipy.stats import norm
import scipy.io # For importing a matlab file
import matplotlib.pyplot as plt
# Edit default plot settings (colours from colorbrewer2.org)
plt.rc('font', size=14)
plt.rc('lines', color='#377eb8', linewidth=2)
plt.rc('axes', color_cycle=(plt.rcParams['lines.color'],)) # Disable color cycle
# SAT-example data (BDA3 p. 120)
# y is the estimated treatment effect
# s is the standard error of effect estimate
y = np.array([28, 8, -3, 7, -1, 1, 18, 12])
s = np.array([15, 10, 16, 11, 9, 11, 10, 18])
M = len(y)
# load the pre-computed results for the hierarchical model
# replace this with your own code in Ex 5.1*
hres_path = '../utilities_and_data/demo5_2.mat'
hres = scipy.io.loadmat(hres_path)
''' Content information of the precalculated results:
>>> scipy.io.whosmat('demo5_2.mat')
[('pxm', (8, 500), 'double'),
('t', (1, 1000), 'double'),
('tp', (1, 1000), 'double'),
('tsd', (8, 1000), 'double'),
('tm', (8, 1000), 'double')]
'''
pxm = hres['pxm']
t = hres['t'][0]
tp = hres['tp'][0]
tsd = hres['tsd']
tm = hres['tm']
# plot the separate, pooled and hierarchical models
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
x = np.linspace(-40, 60, 500)
# separate
lines = axes[0].plot(x, norm.pdf(x[:,None], y[1:], s[1:]), linewidth=1)
line, = axes[0].plot(x, norm.pdf(x, y[0], s[0]), 'r')
axes[0].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[0].set_yticks(())
axes[0].set_title('separate model')
# pooled
axes[1].plot(
x,
norm.pdf(
x,
np.sum(y/s**2)/np.sum(1/s**2),
np.sqrt(1/np.sum(1/s**2))
),
label='All schools'
)
axes[1].legend(loc='upper left')
axes[1].set_yticks(())
axes[1].set_title('pooled model')
# hierarchical
lines = axes[2].plot(x, pxm[1:].T, linewidth=1)
line, = axes[2].plot(x, pxm[0], 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_yticks(())
axes[2].set_title('hierarchical model')
axes[2].set_xlabel('Treatment effect')
# plot various marginal and conditional posterior summaries
fig, axes = plt.subplots(3, 1, sharex=True, figsize=(8,10))
axes[0].plot(t, tp)
axes[0].set_yticks(())
axes[0].set_title(r'marginal posterior density $p(\tau|y)$')
axes[0].set_ylabel(r'$p(\tau|y)$', fontsize=20)
axes[0].set_xlim([0,35])
lines = axes[1].plot(t, tm[1:].T, linewidth=1)
line, = axes[1].plot(t, tm[0].T, 'r')
axes[1].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[1].set_title(r'conditional posterior means of effects '
r'$\operatorname{E}(\theta_j|\tau,y)$')
axes[1].set_ylabel(r'$\operatorname{E}(\theta_j|\tau,y)$', fontsize=20)
lines = axes[2].plot(t, tsd[1:].T, linewidth=1)
line, = axes[2].plot(t, tsd[0].T, 'r')
axes[2].legend((line, lines[1]), ('school A', 'other schools'),
loc='upper left')
axes[2].set_title(r'standard deviations of effects '
r'$\operatorname{sd}(\theta_j|\tau,y)$')
axes[2].set_ylabel(r'$\operatorname{sd}(\theta_j|\tau,y)$', fontsize=20)
axes[2].set_xlabel(r'$\tau$', fontsize=20)
plt.show()
| gpl-3.0 |
pmeier82/SpikePlot | spikeplot/plot_xvf_tensor.py | 1 | 3432 | # -*- coding: utf-8 -*-
#
# spikeplot - plot_xvf_tensor.py
#
# Philipp Meier <pmeier82 at googlemail dot com>
# 2011-09-29
#
"""plot the xi vs f tensor in a grid"""
__docformat__ = 'restructuredtext'
__all__ = ['xvf_tensor']
##---IMPORTS
from .common import save_figure, check_plotting_handle, plt
##---FUNCTION
def xvf_tensor(data, nc=4, data_trans=None, plot_handle=None,
title='Xi vs F Tensor', filename=None, show=True):
"""plots xcorrs tensor for a templates-filter set
:Parameters:
# xvf_tensor parameters
data : list
List holding [templates, filters, xvft]. Templates and filters
are in the channel concatenated representation. xvft has
dimensions
as [time, filters, templates]
nc : int
Channel count for templates, and filters.
data_trans : func
If not None, it has to be a data transformation function or lambda
that can be applied to the xvf tensor data.
# plot parameters
plot_handle : figure or axis
A reference to a figure or axis, or None if one has to be created.
title : str
A title for the plot. No title if None or ''.
filename : str
If given and a valid path on the local system, save the figure.
show : bool
If True, show the figure.
:Returns:
matplotlib.figure
Reference th the figure plotted on
matplotlib.axis
Reference to the axis plotted on
"""
# checks
fig = check_plotting_handle(plot_handle, create_ax=False)[0]
fig.clear()
if not isinstance(data, list):
raise TypeError('data expected to be a list of ndarrays: '
'[templates, filters,xvf-tensor data]')
if len(data) != 3:
raise ValueError('data expected to be a list of ndarrays: '
'[templates, filters,xvf-tensor data]')
temps, filts, xvft = data
if temps.shape != filts.shape:
raise ValueError('inconsistent shapes for templates and filters')
nitem = temps.shape[0]
# apply data transformation
if data_trans is not None:
xvft = data_trans(xvft)
# produce plot
n1 = nitem + 1
fmin, fmax = filts.min() * 1.1, filts.max() * 1.1
xmin, xmax = temps.min() * 1.1, temps.max() * 1.1
xvftmin, xvftmax = xvft.min() * 1.1, xvft.max() * 1.1
for j in xrange(nitem):
# j-th filter
ax_fj = fig.add_subplot(n1, n1, n1 * (j + 1) + 1)
ax_fj.plot(filts[j])
ax_fj.set_ylim(fmin, fmax)
ax_fj.set_xlim((0, temps[0].size))
# j-th xi
ax_uj = fig.add_subplot(n1, n1, j + 2)
ax_uj.plot(temps[j])
ax_uj.set_ylim(xmin, xmax)
ax_uj.set_xlim((0, temps[0].size))
# xcorrs
for i in xrange(nitem):
# the filter output of the j-th filter with the i-th unit
ax_xcij = fig.add_subplot(n1, n1, n1 * (j + 1) + i + 2)
ax_xcij.plot(xvft[i, j, :])
ax_xcij.set_ylim(xvftmin, xvftmax)
ax_xcij.set_xlim((0, xvft[i, j, :].size))
# fancy stuff
if title is not None:
fig.suptitle(title)
# produce plot
if filename is not None:
save_figure(fig, filename, '')
if show is True:
plt.show()
# return
return fig
##---MAIN
if __name__ == '__main__':
pass
| mit |
esteinig/netviewP | program/linux/0.7/netview.py | 1 | 28716 | #!/usr/bin/env python
# NetView P v.0.7 - Linux
# Dependencies: PLINK
# Eike Steinig
# Zenger Lab, JCU
# https://github.com/esteinig/netview
import os
import time
import shutil
import argparse
import subprocess
import numpy as np
import multiprocessing as mp
import scipy.sparse.csgraph as csg
import scipy.spatial.distance as sd
from sklearn.neighbors import NearestNeighbors
def main():
commands = CommandLine()
dat = Data()
dat.prefix = commands.arg_dict['prefix']
dat.ploidy = commands.arg_dict['ploidy']
dat.missing = commands.arg_dict['missing']
if commands.arg_dict['visual']:
print('\nGenerated node attribute files only.\n')
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
dat.writeData(f='attributes')
makeProject(commands.arg_dict['project'] + '_attributes', commands.arg_dict['prefix'])
exit(1)
print()
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + " NETVIEW P v.0.7 ")
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "File =", commands.arg_dict['data_file'].upper())
if commands.arg_dict['plink']:
dat.filetype = 'plink'
dat.readData(commands.arg_dict['data_file'], f='plink', sep=commands.arg_dict['sep'])
elif commands.arg_dict['snps']:
dat.filetype = 'snps'
dat.readData(commands.arg_dict['data_file'], f='snp_mat', sep=commands.arg_dict['sep'])
else:
dat.filetype = 'dist'
dat.readData(commands.arg_dict['data_file'], f='matrix', sep=commands.arg_dict['sep'])
dat.readData(commands.arg_dict['attribute_file'], f='attributes', sep=',')
if dat.ploidy == 'diploid':
nsnp = dat.nSNP//2
else:
nsnp = dat.nSNP
print(get_time() + "\t" + "N =", str(dat.n).upper())
print(get_time() + "\t" + "SNPs =", str(nsnp).upper())
print(get_time() + "\t" + "Ploidy =", dat.ploidy.upper())
print(get_time() + "\t" + "---------------------------------")
print(get_time() + "\t" + "Quality Control =", str(commands.arg_dict['qc']).upper())
pipeline = Analysis(dat)
qc = False
if commands.arg_dict['qc'] and pipeline.data.filetype != 'dist':
qc_params = {'--mind': commands.arg_dict['mind'],
'--geno': commands.arg_dict['geno'],
'--maf': commands.arg_dict['maf'],
'--hwe': commands.arg_dict['hwe']}
pipeline.runPLINK(qc_parameters=qc_params, quality=True)
qc = True
if commands.arg_dict['mat'] and pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.data.writeData(file=commands.arg_dict['prefix'] + '_mat.dist', f='matrix')
makeProject(commands.arg_dict['project'] + '_dist', commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
exit(1)
elif commands.arg_dict['mat'] and pipeline.data.filetype == 'dist':
print('\nError. Input is already a Distance Matrix.\n')
exit(1)
if not commands.arg_dict['off']:
if pipeline.data.filetype != 'dist':
pipeline.getDistance(distance=commands.arg_dict['distance'])
pipeline.runNetView(tree=commands.arg_dict['tree'], start=commands.arg_dict['start'],
stop=commands.arg_dict['stop'], step=commands.arg_dict['step'],
algorithm=commands.arg_dict['algorithm'])
if qc:
pipeline.updateNodeAttributes(commands.arg_dict['attribute_file'])
pipeline.data.writeData(f='attributes')
makeProject(commands.arg_dict['project'], commands.arg_dict['prefix'])
print(get_time() + "\t" + "---------------------------------\n")
def makeProject(project, prefix):
cwd = os.getcwd()
project_path = os.path.realpath(os.path.join(os.getcwd(), project))
plink_path = os.path.realpath(os.path.join(project_path, 'plink'))
network_path = os.path.realpath(os.path.join(project_path, 'networks'))
other_path = os.path.realpath(os.path.join(project_path, 'other'))
node_path = os.path.realpath(os.path.join(project_path, 'nodes'))
if os.path.exists(project_path):
shutil.rmtree(project_path)
architecture = [project_path, plink_path, network_path, other_path, node_path]
for directory in architecture:
try:
os.makedirs(directory)
except OSError:
if not os.path.isdir(directory):
raise
for name in os.listdir(cwd):
if name.endswith('.edges'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, network_path)
if name.endswith('.dist'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, other_path)
if name.endswith('.nat'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, node_path)
elif name.startswith(prefix + '_plink_in'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
os.remove(pathname)
elif name.startswith(prefix + '_plink'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, plink_path)
elif name.endswith('_qc.csv'):
pathname = os.path.join(cwd, name)
if os.path.isfile(pathname):
shutil.move(pathname, other_path)
#### Functions for Multiprocessing ####
def netview(matrix, k, mst, algorithm, tree):
nbrs = NearestNeighbors(n_neighbors=k+1, algorithm=algorithm).fit(matrix)
adj_knn = nbrs.kneighbors_graph(matrix).toarray()
np.fill_diagonal(adj_knn, 0)
adj_mknn = (adj_knn == adj_knn.T) * adj_knn
if tree:
adj = mst + adj_mknn
else:
adj = adj_mknn
adjacency = np.tril(adj)
mst_edges = np.argwhere(adjacency < 1)
adjacency[adjacency > 0] = 1.
edges = np.argwhere(adjacency != 0)
weights = matrix[edges[:, 0], edges[:, 1]]
return [k, edges, weights, adjacency, mst_edges]
def netview_callback(k):
print(get_time() + "\t" + ' k=' + str(k[0]))
def get_time():
return time.strftime("[%H:%M:%S]")
#### Command Line Module ####
class CommandLine:
def __init__(self):
self.parser = argparse.ArgumentParser(description='NetView P v0.7', add_help=True)
self.setParser()
self.args = self.parser.parse_args()
self.arg_dict = vars(self.args)
def setParser(self):
data_type = self.parser.add_mutually_exclusive_group(required=True)
# Required Options
self.parser.add_argument('-f', dest='data_file', required=True, type=str,
help="Name of Data File")
data_type.add_argument('-p', dest='plink', action='store_true',
help="PLINK format (.ped/.map)")
data_type.add_argument('-s', dest='snps', action='store_true',
help="SNP matrix (N x SNPs)")
data_type.add_argument('-m', dest='dist', action='store_true',
help="Distance matrix (N x N)")
self.parser.add_argument('-a', dest='attribute_file', default='', type=str, required=True,
help="Node attribute file (.csv)")
# MAIN Options
self.parser.add_argument('--quality', dest='qc', action='store_true', default=False,
help="Quality control in PLINK (OFF)")
self.parser.add_argument('--distance', dest='distance', default='asd', type=str,
help="Distance measure for SNPs: hamming, asd, correlation... (asd)")
self.parser.add_argument('--algorithm', dest='algorithm', default='auto', type=str,
help="Algorithm for NN: auto, ball_tree, kd_tree, brute (brute)")
self.parser.add_argument('--mst-off', dest='tree', action='store_false', default=True,
help="Disable minimum spanning tree (OFF)")
self.parser.add_argument('--ploidy', dest='ploidy', default='diploid', type=str,
help="Set ploidy: haploid, diploid (diploid.")
self.parser.add_argument('--missing', dest='missing', default='0', type=str,
help="Set missing character (0)")
self.parser.add_argument('--prefix', dest='prefix', default='project', type=str,
help="Set prefix (project)")
self.parser.add_argument('--project', dest='project', default=time.strftime("%d-%m-%Y_%H-%M-%S"), type=str,
help="Output project name (timestamp)")
self.parser.add_argument('--sep', dest='sep', default='\t', type=str,
help="Delimiter for data file (\\t).")
# PARAMETER Options
self.parser.add_argument('--mind', dest='mind', default=0.1, type=float,
help="Filter samples > missing rate (0.1)")
self.parser.add_argument('--geno', dest='geno', default=0.1, type=float,
help="Filter SNPs > missing rate (0.1)")
self.parser.add_argument('--maf', dest='maf', default=0.01, type=float,
help="Filter SNPs < minor allele frequency (0.01)")
self.parser.add_argument('--hwe', dest='hwe', default=0.001, type=float,
help="Filter SNPs failing HWE test at P < (0.001)")
self.parser.add_argument('--start', dest='start', default=10, type=int,
help="Start at k = (10)")
self.parser.add_argument('--stop', dest='stop', default=40, type=int,
help="Stop at k = (40)")
self.parser.add_argument('--step', dest='step', default=10, type=int,
help="Step by k = (10)")
# PIPELINE Options
self.parser.add_argument('--visual', dest='visual', action='store_true', default=False,
help="Node attributes ONLY (OFF)")
self.parser.add_argument('--off', dest='off', action='store_true', default=False,
help="Switch off NetView and run only QC (OFF).")
self.parser.add_argument('--matrix', dest='mat', action='store_true', default=False,
help="Generate distance matrix ONLY (OFF).")
#### Data Module ####
class Data:
### DATA ATTRIBUTES ###
def __init__(self):
self.prefix = "project"
self.ploidy = 'diploid'
self.missing = "0"
self.n = 0
self.nSNP = 0
self.ids = [] # IDs
self.alleles = []
self.snps = np.arange(5) # Array (N x SNPs)
self.biodata = [] # List/Alignment of BioPython SeqRecords
self.meta_data = {}
self.snp_data = {}
self.matrices = {}
self.networks = {}
self.matrix = np.arange(5) # Current Matrix
self.netview_runs = 0
self.filetype = ''
### DATA READER ###
def readData(self, file, f, sep="\t", header=False, add_col=0):
def _read_nexus(file, sep=sep):
snp_position = []
snps = []
matrix = False
for line in file:
content = line.strip().split(sep)
if matrix == True:
if ";" in line:
break
snp_position.append(content[0])
snps.append(content[1:])
else:
if "dimensions" in line:
self.n = int(content[1].split("=")[1])
self.nSNP = int(content[2].split("=")[1][:-1])
elif "taxlabels" in line:
self.ids = content[1:]
elif "matrix" in line:
matrix = True
self.snps = np.array([list(i) for i in zip(*snps)]) # ordered by N
self.snp_data['snp_id'] = [''.join(p.split("_")[:-1]) for p in snp_position]
self.snp_data['snp_position'] = [p.split("_")[-1] for p in snp_position]
self.filetype = 'nexus'
def _read_raxml(file, sep=sep):
header = []
ids = []
snps = []
for line in file:
content = line.strip().split(sep)
if header:
ids.append(content[0])
snps.append(content[1])
else:
header = content
self.n = header[0]
self.nSNP = header[1]
self.ids = ids
self.snps = np.array(snps)
self.filetype = 'raxml'
def _read_plink(file, filename, sep=sep):
map_name = filename.split(".")[0] + ".map"
map_file = open(map_name)
ids = []
meta = []
snps = []
for line in file:
content = line.strip().split(sep)
ids.append(content[1])
snps.append(content[6:])
meta.append(content[:6])
self.ids = ids
self.snps = np.array(snps)
self.nSNP = len(self.snps[0])
self.n = len(self.ids)
self.meta_data["pop"] = [i[0] for i in meta]
self.meta_data["dam"] = [i[2] for i in meta]
self.meta_data["sire"] = [i[3] for i in meta]
self.meta_data["sex"] = [i[4] for i in meta]
self.meta_data["phenotype"] = [i[5] for i in meta]
map_content = [line.strip().split() for line in map_file]
map_content = list(zip(*map_content))
self.snp_data['snp_chromosome'] = list(map_content[0])
self.snp_data['snp_id'] = list(map_content[1])
self.snp_data['snp_genetic_distance'] = list(map_content[2])
self.snp_data['snp_position'] = list(map_content[3])
map_file.close()
self.filetype = 'plink'
def _read_matrix(file, header=header, add_col=add_col, sep=sep):
content = [line.strip().split(sep)[add_col:] for line in file]
if header:
content = content[1:]
matrix = np.array([list(map(float, ind)) for ind in content])
self.matrix = matrix
self.matrices['input'] = matrix
return matrix
def _read_snp_mat(file, sep):
matrix = np.array([line.strip().split(sep) for line in file])
self.snps = matrix
self.n = len(matrix[:, 1])
self.nSNP = len(matrix[1, :])
if self.ploidy == 'diploid':
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP//2)]
else:
self.snp_data['snp_id'] = [str(i) for i in range(self.nSNP)]
def _read_attributes(file, sep=sep):
content = [line.strip().split(sep) for line in file]
head = content[0]
content = list(zip(*content[1:]))
for i in range(len(head)):
self.meta_data[head[i]] = content[i]
self.ids = list(content[0])
## Main Read ##
infile = open(file)
f = f.lower()
if f == "nexus":
_read_nexus(infile, sep)
elif f =="raxml":
_read_raxml(infile, sep)
elif f == "plink":
_read_plink(infile, file, sep)
elif f == "matrix":
matrix = _read_matrix(infile, header, add_col, sep)
elif f == 'snp_mat':
_read_snp_mat(infile, sep)
elif f == 'attributes':
_read_attributes(infile, sep)
else:
print("File format not supported.")
raise IOError
infile.close()
if f != 'attributes':
alleles = np.unique(self.snps).tolist()
if self.missing in alleles:
alleles.remove(self.missing)
self.alleles = alleles
if f == 'matrix':
return matrix
### DATA WRITER ###
def writeData(self, f, file='data.out', sep="\t"):
def _write_raxml(outfile, sep):
outfile.write(str(self.n) + sep + str(self.nSNP) + "\n")
for i in range(self.n):
outfile.write(self.ids[i] + sep + ''.join(self.snps[i]) + "\n")
def _write_nexus(outfile, sep):
taxlabels = " ".join(self.ids)
header = '#nexus\nbegin data;\ndimensions ntax=' + str(self.n) + ' nchar=' + str(self.nSNP) + \
';\nformat symbols="AGCT" gap=. datatype=nucleotide;\ntaxlabels ' + taxlabels + ';\nmatrix\n'
tail = ";\nend;"
snps = list(zip(*self.snps))
outfile.write(header)
for i in range(self.nSNP):
if 'snp_chromosome' in self.snp_data.keys():
outfile.write(self.snp_data['snp_chromosome'][i] + "_")
else:
outfile.write(sep)
if 'snp_id' in self.snp_data.keys():
outfile.write(self.snp_data['snp_id'][i] + sep)
else:
outfile.write("SNP" + str(i) + sep)
outfile.write(sep.join(snps[i]) + "\n")
outfile.write(tail)
def _write_plink(outfile, filename, sep):
mapname = filename.split('.')[0] + ".map"
for i in range(self.n):
if 'pop' in self.meta_data.keys():
outfile.write(self.meta_data['pop'][i] + sep)
else:
outfile.write("NA" + sep)
if self.ids:
outfile.write(self.ids[i] + sep)
else:
outfile.write("N" + str(i+1) + sep)
if 'dam' in self.meta_data.keys():
outfile.write(self.meta_data['dam'][i] + sep)
else:
outfile.write("0" + sep)
if 'sire' in self.meta_data.keys():
outfile.write(self.meta_data['sire'][i] + sep)
else:
outfile.write("0" + sep)
if 'sex' in self.meta_data.keys():
outfile.write(self.meta_data['sex'][i] + sep)
else:
outfile.write("0" + sep)
if 'phenotype' in self.meta_data.keys():
outfile.write(self.meta_data['phenotype'][i] + sep)
else:
outfile.write("0" + sep)
outfile.write(sep.join(self.snps[i]) + "\n")
map_file = open(mapname, "w")
if 'snp_id' in self.snp_data:
for i in range(len(self.snp_data['snp_id'])):
if 'snp_chromosome' in self.snp_data.keys():
map_file.write(self.snp_data['snp_chromosome'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_id' in self.snp_data.keys():
map_file.write(self.snp_data['snp_id'][i] + sep)
else:
map_file.write("SNP" + str(i+1) + sep)
if 'snp_genetic_distance' in self.snp_data.keys():
map_file.write(self.snp_data['snp_genetic_distance'][i] + sep)
else:
map_file.write("0" + sep)
if 'snp_position' in self.snp_data.keys():
map_file.write(self.snp_data['snp_position'][i] + sep + "\n")
else:
map_file.write("0" + sep + "\n")
map_file.close()
def _write_metadata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
ordered_keys = sorted([key for key in self.meta_data.keys()])
outfile.write("Isolate")
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.n):
if self.ids:
outfile.write(self.ids[i])
else:
outfile.write("N" + str(1))
for key in ordered_keys:
outfile.write(sep + self.meta_data[key][i])
outfile.write("\n")
def _write_snpdata(outfile, sep):
outfile.write("#" + sep + "n=" + str(self.n) + sep + "nSNP=" +
str(self.nSNP) + sep + "(" + self.ploidy + ")\n")
snp_data = dict(self.snp_data)
ordered_keys = sorted([key for key in snp_data.keys()])
outfile.write("SNP" + sep)
for key in ordered_keys:
outfile.write(sep + key)
outfile.write("\n")
for i in range(self.nSNP):
outfile.write("SNP_" + str(i))
for key in ordered_keys:
outfile.write(sep + snp_data[key][i])
outfile.write("\n")
def _write_attributes():
for key, value in self.meta_data.items():
outname = self.prefix + '_' + key + '.nat'
out = open(outname, 'w')
out.write('ID\t' + self.prefix + '_' + key + '\n')
for i in range(len(value)):
out.write(self.ids[i] + '\t' + value[i] + '\n')
out.close()
## Main Write ##
if f == 'attributes':
_write_attributes()
else:
filename = file
outfile = open(filename, "w")
f = f.lower()
if f == "nexus":
_write_nexus(outfile, sep)
elif f =="raxml":
_write_raxml(outfile, sep)
elif f == "plink":
_write_plink(outfile, file, sep)
elif f == "matrix":
np.savetxt(filename, self.matrix, fmt='%.9f', delimiter=sep)
elif f == "meta":
_write_metadata(outfile, sep)
elif f == "snp":
_write_snpdata(outfile, sep)
else:
raise IOError("File format not supported.")
outfile.close()
def __str__(self):
return ('-----------\nNumber of Individuals: %i\nNumber of SNPs: %i\nPloidy: %s\n-----------\n') % \
(self.n, self.nSNP, self.ploidy)
#### Analysis Module ####
class Analysis:
def __init__(self, data):
self.data = data
def getDistance(self, target='snps', distance='hamming'):
print(get_time() + "\t" + 'Distance = ' + distance.upper())
if self.data.filetype == 'dist':
target = 'matrix'
if target == 'matrix':
matrix = np.array(self.data.matrix)
else:
# Convert alleles to numbers (e.g. A -> 1, B -> 2) for use in scipy.spatial.distance.pdist()
allele_codes = {}
for i in range(len(self.data.alleles)):
allele_codes[self.data.alleles[i]] = int(i+1)
allele_codes[self.data.missing] = 0 # missing can not be 1 to i
snps = self.data.snps
for a, code in allele_codes.items():
snps[snps == a] = code
matrix = snps
if distance == 'asd':
self.runPLINK(asd=True)
self.data.readData(file=self.data.prefix + '_plink.mdist', f='matrix', sep=' ')
else:
matrix = sd.squareform(sd.pdist(matrix, distance))
self.data.matrix = matrix
self.data.matrices[distance] = self.data.matrix
return matrix
def runPLINK(self, qc_parameters={}, commandstring='', asd=False, quality=False):
if self.data.ploidy == 'haploid':
raise AttributeError('Haploid genotypes not supported for PLINK.')
if commandstring:
subprocess.call(commandstring)
else:
self.data.writeData(file=self.data.prefix + '_plink_in.ped', f='plink')
if quality and qc_parameters:
command = ['plink', '--noweb', '--file', self.data.prefix + '_plink_in']
for key, value in qc_parameters.items():
command.append(key)
command.append(str(value))
command.append('--recode')
command.append('--out')
command.append(self.data.prefix + '_plink_qc')
subprocess.call(command, stdout=subprocess.DEVNULL)
if os.path.exists(self.data.prefix + '_plink_qc.ped'):
self.data.readData(file=self.data.prefix + '_plink_qc.ped', f='plink', sep=' ')
if asd:
subprocess.call(['plink', '--noweb', '--file', self.data.prefix + '_plink_in', '--cluster', '--distance-matrix',
'--out', self.data.prefix + '_plink'], stdout=subprocess.DEVNULL)
def updateNodeAttributes(self, attribute_file):
if os.path.isfile(self.data.prefix + '_plink_qc.irem'):
infile = open(self.data.prefix + '_plink_qc.irem')
to_remove = [line.strip().split()[1] for line in infile]
infile.close()
infile = open(attribute_file)
outname = attribute_file.split('.')[0] + '_qc.csv'
outfile = open(outname, 'w')
for line in infile:
content = line.strip().split(',')
if content[0] not in to_remove:
outfile.write(line)
infile.close()
outfile.close()
self.data.readData(file=outname, f='attributes', sep=',')
def runNetView(self, tree=True, start=10, stop=40, step=10, algorithm='auto'):
print(get_time() + "\t" + "Minimum Spanning Tree = " + str(tree).upper())
print(get_time() + "\t" + "Nearest Neighbour = " + algorithm.upper())
print(get_time() + "\t" + "k = " + str(start) + " - " + str(stop) + ' (by ' + str(step) + ')')
print(get_time() + "\t" + "---------------------------------")
self.data.netview_runs += 1
matrix = self.data.matrix
if tree:
mst = csg.minimum_spanning_tree(matrix)
mst = mst.toarray()
self.data.networks['mst_' + str(self.data.netview_runs)] = mst
mst = mst + mst.T
else:
mst = None
pool = mp.Pool()
networks = [pool.apply_async(netview, args=(matrix, k, mst, algorithm, tree,), callback=netview_callback)
for k in range(start, stop+1, step)]
pool.close()
pool.join()
for item in networks:
result = item.get()
edges_array = result[1]
edges = result[1].tolist()
mst_edges = result[4].tolist()
self.data.networks['netview_k' + str(result[0]) + '_' + str(self.data.netview_runs)] = result[1:]
filename = self.data.prefix + '_netview_k' + str(result[0]) +\
"_" + str(self.data.netview_runs) + '.edges'
out = open(filename, "w")
out.write('Source\tTarget\tDistance\tMST\n')
for i in range(len(edges)):
out.write(str(self.data.ids[edges[i][0]]) + "\t" + str(self.data.ids[edges[i][1]]) +
"\t" + str(matrix[edges[i][0], edges[i][1]]))
if tree:
if edges[i] in mst_edges:
out.write('\t' + 'red\n')
else:
out.write('\t' + 'grey\n')
else:
out.write("\n")
if not tree:
singletons = np.setdiff1d(np.arange(self.data.n), edges_array.flatten()).tolist()
if singletons:
for node in singletons:
out.write(str(node) + '\n')
out.close()
main()
| gpl-2.0 |
hsaputra/tensorflow | tensorflow/contrib/learn/python/learn/learn_io/data_feeder.py | 8 | 31357 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implementations of different data feeders to provide data for TF trainer."""
# TODO(ipolosukhin): Replace this module with feed-dict queue runners & queues.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import math
import numpy as np
import six
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import tensor_util
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
# pylint: disable=g-multiple-import,g-bad-import-order
from .pandas_io import HAS_PANDAS, extract_pandas_data, extract_pandas_matrix, extract_pandas_labels
from .dask_io import HAS_DASK, extract_dask_data, extract_dask_labels
# pylint: enable=g-multiple-import,g-bad-import-order
def _get_in_out_shape(x_shape, y_shape, n_classes, batch_size=None):
"""Returns shape for input and output of the data feeder."""
x_is_dict, y_is_dict = isinstance(
x_shape, dict), y_shape is not None and isinstance(y_shape, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
if batch_size is None:
batch_size = list(x_shape.values())[0][0] if x_is_dict else x_shape[0]
elif batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
if x_is_dict:
input_shape = {}
for k, v in list(x_shape.items()):
input_shape[k] = [batch_size] + (list(v[1:]) if len(v) > 1 else [1])
else:
x_shape = list(x_shape[1:]) if len(x_shape) > 1 else [1]
input_shape = [batch_size] + x_shape
if y_shape is None:
return input_shape, None, batch_size
def out_el_shape(out_shape, num_classes):
out_shape = list(out_shape[1:]) if len(out_shape) > 1 else []
# Skip first dimension if it is 1.
if out_shape and out_shape[0] == 1:
out_shape = out_shape[1:]
if num_classes is not None and num_classes > 1:
return [batch_size] + out_shape + [num_classes]
else:
return [batch_size] + out_shape
if not y_is_dict:
output_shape = out_el_shape(y_shape, n_classes)
else:
output_shape = dict([
(k, out_el_shape(v, n_classes[k]
if n_classes is not None and k in n_classes else None))
for k, v in list(y_shape.items())
])
return input_shape, output_shape, batch_size
def _data_type_filter(x, y):
"""Filter data types into acceptable format."""
if HAS_DASK:
x = extract_dask_data(x)
if y is not None:
y = extract_dask_labels(y)
if HAS_PANDAS:
x = extract_pandas_data(x)
if y is not None:
y = extract_pandas_labels(y)
return x, y
def _is_iterable(x):
return hasattr(x, 'next') or hasattr(x, '__next__')
def setup_train_data_feeder(x,
y,
n_classes,
batch_size=None,
shuffle=True,
epochs=None):
"""Create data feeder, to sample inputs from dataset.
If `x` and `y` are iterators, use `StreamingDataFeeder`.
Args:
x: numpy, pandas or Dask matrix or dictionary of aforementioned. Also
supports iterables.
y: numpy, pandas or Dask array or dictionary of aforementioned. Also
supports
iterables.
n_classes: number of classes. Must be None or same type as y. In case, `y`
is `dict`
(or iterable which returns dict) such that `n_classes[key] = n_classes for
y[key]`
batch_size: size to split data into parts. Must be >= 1.
shuffle: Whether to shuffle the inputs.
epochs: Number of epochs to run.
Returns:
DataFeeder object that returns training data.
Raises:
ValueError: if one of `x` and `y` is iterable and the other is not.
"""
x, y = _data_type_filter(x, y)
if HAS_DASK:
# pylint: disable=g-import-not-at-top
import dask.dataframe as dd
if (isinstance(x, (dd.Series, dd.DataFrame)) and
(y is None or isinstance(y, (dd.Series, dd.DataFrame)))):
data_feeder_cls = DaskDataFeeder
else:
data_feeder_cls = DataFeeder
else:
data_feeder_cls = DataFeeder
if _is_iterable(x):
if y is not None and not _is_iterable(y):
raise ValueError('Both x and y should be iterators for '
'streaming learning to work.')
return StreamingDataFeeder(x, y, n_classes, batch_size)
return data_feeder_cls(
x, y, n_classes, batch_size, shuffle=shuffle, epochs=epochs)
def _batch_data(x, batch_size=None):
if (batch_size is not None) and (batch_size <= 0):
raise ValueError('Invalid batch_size %d.' % batch_size)
x_first_el = six.next(x)
x = itertools.chain([x_first_el], x)
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
for data in x:
if isinstance(data, dict):
for k, v in list(data.items()):
chunk[k].append(v)
if (batch_size is not None) and (len(chunk[k]) >= batch_size):
chunk[k] = np.matrix(chunk[k])
chunk_filled = True
if chunk_filled:
yield chunk
chunk = dict([(k, []) for k in list(x_first_el.keys())]) if isinstance(
x_first_el, dict) else []
chunk_filled = False
else:
chunk.append(data)
if (batch_size is not None) and (len(chunk) >= batch_size):
yield np.matrix(chunk)
chunk = []
if isinstance(x_first_el, dict):
for k, v in list(data.items()):
chunk[k] = np.matrix(chunk[k])
yield chunk
else:
yield np.matrix(chunk)
def setup_predict_data_feeder(x, batch_size=None):
"""Returns an iterable for feeding into predict step.
Args:
x: numpy, pandas, Dask array or dictionary of aforementioned. Also supports
iterable.
batch_size: Size of batches to split data into. If `None`, returns one
batch of full size.
Returns:
List or iterator (or dictionary thereof) of parts of data to predict on.
Raises:
ValueError: if `batch_size` <= 0.
"""
if HAS_DASK:
x = extract_dask_data(x)
if HAS_PANDAS:
x = extract_pandas_data(x)
if _is_iterable(x):
return _batch_data(x, batch_size)
if len(x.shape) == 1:
x = np.reshape(x, (-1, 1))
if batch_size is not None:
if batch_size <= 0:
raise ValueError('Invalid batch_size %d.' % batch_size)
n_batches = int(math.ceil(float(len(x)) / batch_size))
return [x[i * batch_size:(i + 1) * batch_size] for i in xrange(n_batches)]
return [x]
def setup_processor_data_feeder(x):
"""Sets up processor iterable.
Args:
x: numpy, pandas or iterable.
Returns:
Iterable of data to process.
"""
if HAS_PANDAS:
x = extract_pandas_matrix(x)
return x
def check_array(array, dtype):
"""Checks array on dtype and converts it if different.
Args:
array: Input array.
dtype: Expected dtype.
Returns:
Original array or converted.
"""
# skip check if array is instance of other classes, e.g. h5py.Dataset
# to avoid copying array and loading whole data into memory
if isinstance(array, (np.ndarray, list)):
array = np.array(array, dtype=dtype, order=None, copy=False)
return array
def _access(data, iloc):
"""Accesses an element from collection, using integer location based indexing.
Args:
data: array-like. The collection to access
iloc: `int` or `list` of `int`s. Location(s) to access in `collection`
Returns:
The element of `a` found at location(s) `iloc`.
"""
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if isinstance(data, pd.Series) or isinstance(data, pd.DataFrame):
return data.iloc[iloc]
return data[iloc]
def _check_dtype(dtype):
if dtypes.as_dtype(dtype) == dtypes.float64:
logging.warn(
'float64 is not supported by many models, consider casting to float32.')
return dtype
class DataFeeder(object):
"""Data feeder is an example class to sample data for TF trainer."""
def __init__(self,
x,
y,
n_classes,
batch_size=None,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DataFeeder instance.
Args:
x: One feature sample which can either Nd numpy matrix of shape
`[n_samples, n_features, ...]` or dictionary of Nd numpy matrix.
y: label vector, either floats for regression or class id for
classification. If matrix, will consider as a sequence of labels.
Can be `None` for unsupervised setting. Also supports dictionary of
labels.
n_classes: Number of classes, 0 and 1 are considered regression, `None`
will pass through the input labels without one-hot conversion. Also, if
`y` is `dict`, then `n_classes` must be `dict` such that
`n_classes[key] = n_classes for label y[key]`, `None` otherwise.
batch_size: Mini-batch size to accumulate samples in one mini batch.
shuffle: Whether to shuffle `x`.
random_state: Numpy `RandomState` object to reproduce sampling.
epochs: Number of times to iterate over input data before raising
`StopIteration` exception.
Attributes:
x: Input features (ndarray or dictionary of ndarrays).
y: Input label (ndarray or dictionary of ndarrays).
n_classes: Number of classes (if `None`, pass through indices without
one-hot conversion).
batch_size: Mini-batch size to accumulate.
input_shape: Shape of the input (or dictionary of shapes).
output_shape: Shape of the output (or dictionary of shapes).
input_dtype: DType of input (or dictionary of shapes).
output_dtype: DType of output (or dictionary of shapes.
"""
x_is_dict, y_is_dict = isinstance(x, dict), y is not None and isinstance(
y, dict)
if isinstance(y, list):
y = np.array(y)
self._x = dict([(k, check_array(v, v.dtype)) for k, v in list(x.items())
]) if x_is_dict else check_array(x, x.dtype)
self._y = None if y is None else (
dict([(k, check_array(v, v.dtype)) for k, v in list(y.items())])
if y_is_dict else check_array(y, y.dtype))
# self.n_classes is not None means we're converting raw target indices
# to one-hot.
if n_classes is not None:
if not y_is_dict:
y_dtype = (np.int64
if n_classes is not None and n_classes > 1 else np.float32)
self._y = (None if y is None else check_array(y, dtype=y_dtype))
self.n_classes = n_classes
self.max_epochs = epochs
x_shape = dict([(k, v.shape) for k, v in list(self._x.items())
]) if x_is_dict else self._x.shape
y_shape = dict([(k, v.shape) for k, v in list(self._y.items())
]) if y_is_dict else None if y is None else self._y.shape
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
# Input dtype matches dtype of x.
self._input_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._x.items())])
if x_is_dict else _check_dtype(self._x.dtype))
# self._output_dtype == np.float32 when y is None
self._output_dtype = (
dict([(k, _check_dtype(v.dtype)) for k, v in list(self._y.items())])
if y_is_dict else (
_check_dtype(self._y.dtype) if y is not None else np.float32))
# self.n_classes is None means we're passing in raw target indices
if n_classes is not None and y_is_dict:
for key in list(n_classes.keys()):
if key in self._output_dtype:
self._output_dtype[key] = np.float32
self._shuffle = shuffle
self.random_state = np.random.RandomState(
42) if random_state is None else random_state
if x_is_dict:
num_samples = list(self._x.values())[0].shape[0]
elif tensor_util.is_tensor(self._x):
num_samples = self._x.shape[
0].value # shape will be a Dimension, extract an int
else:
num_samples = self._x.shape[0]
if self._shuffle:
self.indices = self.random_state.permutation(num_samples)
else:
self.indices = np.array(range(num_samples))
self.offset = 0
self.epoch = 0
self._epoch_placeholder = None
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def shuffle(self):
return self._shuffle
@property
def input_dtype(self):
return self._input_dtype
@property
def output_dtype(self):
return self._output_dtype
@property
def batch_size(self):
return self._batch_size
def make_epoch_variable(self):
"""Adds a placeholder variable for the epoch to the graph.
Returns:
The epoch placeholder.
"""
self._epoch_placeholder = array_ops.placeholder(
dtypes.int32, [1], name='epoch')
return self._epoch_placeholder
def input_builder(self):
"""Builds inputs in the graph.
Returns:
Two placeholders for inputs and outputs.
"""
def get_placeholder(shape, dtype, name_prepend):
if shape is None:
return None
if isinstance(shape, dict):
placeholder = {}
for key in list(shape.keys()):
placeholder[key] = array_ops.placeholder(
dtypes.as_dtype(dtype[key]), [None] + shape[key][1:],
name=name_prepend + '_' + key)
else:
placeholder = array_ops.placeholder(
dtypes.as_dtype(dtype), [None] + shape[1:], name=name_prepend)
return placeholder
self._input_placeholder = get_placeholder(self.input_shape,
self._input_dtype, 'input')
self._output_placeholder = get_placeholder(self.output_shape,
self._output_dtype, 'output')
return self._input_placeholder, self._output_placeholder
def set_placeholders(self, input_placeholder, output_placeholder):
"""Sets placeholders for this data feeder.
Args:
input_placeholder: Placeholder for `x` variable. Should match shape
of the examples in the x dataset.
output_placeholder: Placeholder for `y` variable. Should match
shape of the examples in the y dataset. Can be `None`.
"""
self._input_placeholder = input_placeholder
self._output_placeholder = output_placeholder
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {
'epoch': self.epoch,
'offset': self.offset,
'batch_size': self._batch_size
}
def get_feed_dict_fn(self):
"""Returns a function that samples data into given placeholders.
Returns:
A function that when called samples a random subset of batch size
from `x` and `y`.
"""
x_is_dict, y_is_dict = isinstance(
self._x, dict), self._y is not None and isinstance(self._y, dict)
# Assign input features from random indices.
def extract(data, indices):
return (np.array(_access(data, indices)).reshape((indices.shape[0], 1)) if
len(data.shape) == 1 else _access(data, indices))
# assign labels from random indices
def assign_label(data, shape, dtype, n_classes, indices):
shape[0] = indices.shape[0]
out = np.zeros(shape, dtype=dtype)
for i in xrange(out.shape[0]):
sample = indices[i]
# self.n_classes is None means we're passing in raw target indices
if n_classes is None:
out[i] = _access(data, sample)
else:
if n_classes > 1:
if len(shape) == 2:
out.itemset((i, int(_access(data, sample))), 1.0)
else:
for idx, value in enumerate(_access(data, sample)):
out.itemset(tuple([i, idx, value]), 1.0)
else:
out[i] = _access(data, sample)
return out
def _feed_dict_fn():
"""Function that samples data into given placeholders."""
if self.max_epochs is not None and self.epoch + 1 > self.max_epochs:
raise StopIteration
assert self._input_placeholder is not None
feed_dict = {}
if self._epoch_placeholder is not None:
feed_dict[self._epoch_placeholder.name] = [self.epoch]
# Take next batch of indices.
x_len = list(self._x.values())[0].shape[
0] if x_is_dict else self._x.shape[0]
end = min(x_len, self.offset + self._batch_size)
batch_indices = self.indices[self.offset:end]
# adding input placeholder
feed_dict.update(
dict([(self._input_placeholder[k].name, extract(v, batch_indices))
for k, v in list(self._x.items())]) if x_is_dict else
{self._input_placeholder.name: extract(self._x, batch_indices)})
# move offset and reset it if necessary
self.offset += self._batch_size
if self.offset >= x_len:
self.indices = self.random_state.permutation(
x_len) if self._shuffle else np.array(range(x_len))
self.offset = 0
self.epoch += 1
# return early if there are no labels
if self._output_placeholder is None:
return feed_dict
# adding output placeholders
if y_is_dict:
for k, v in list(self._y.items()):
n_classes = (self.n_classes[k] if k in self.n_classes else
None) if self.n_classes is not None else None
shape, dtype = self.output_shape[k], self._output_dtype[k]
feed_dict.update({
self._output_placeholder[k].name:
assign_label(v, shape, dtype, n_classes, batch_indices)
})
else:
shape, dtype, n_classes = self.output_shape, self._output_dtype, self.n_classes
feed_dict.update({
self._output_placeholder.name:
assign_label(self._y, shape, dtype, n_classes, batch_indices)
})
return feed_dict
return _feed_dict_fn
class StreamingDataFeeder(DataFeeder):
"""Data feeder for TF trainer that reads data from iterator.
Streaming data feeder allows to read data as it comes it from disk or
somewhere else. It's custom to have this iterators rotate infinetly over
the dataset, to allow control of how much to learn on the trainer side.
"""
def __init__(self, x, y, n_classes, batch_size):
"""Initializes a StreamingDataFeeder instance.
Args:
x: iterator each element of which returns one feature sample. Sample can
be a Nd numpy matrix or dictionary of Nd numpy matrices.
y: iterator each element of which returns one label sample. Sample can be
a Nd numpy matrix or dictionary of Nd numpy matrices with 1 or many
classes regression values.
n_classes: indicator of how many classes the corresponding label sample
has for the purposes of one-hot conversion of label. In case where `y`
is a dictionary, `n_classes` must be dictionary (with same keys as `y`)
of how many classes there are in each label in `y`. If key is
present in `y` and missing in `n_classes`, the value is assumed `None`
and no one-hot conversion will be applied to the label with that key.
batch_size: Mini batch size to accumulate samples in one batch. If set
`None`, then assumes that iterator to return already batched element.
Attributes:
x: input features (or dictionary of input features).
y: input label (or dictionary of output features).
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input (can be dictionary depending on `x`).
output_shape: shape of the output (can be dictionary depending on `y`).
input_dtype: dtype of input (can be dictionary depending on `x`).
output_dtype: dtype of output (can be dictionary depending on `y`).
"""
# pylint: disable=invalid-name,super-init-not-called
x_first_el = six.next(x)
self._x = itertools.chain([x_first_el], x)
if y is not None:
y_first_el = six.next(y)
self._y = itertools.chain([y_first_el], y)
else:
y_first_el = None
self._y = None
self.n_classes = n_classes
x_is_dict = isinstance(x_first_el, dict)
y_is_dict = y is not None and isinstance(y_first_el, dict)
if y_is_dict and n_classes is not None:
assert isinstance(n_classes, dict)
# extract shapes for first_elements
if x_is_dict:
x_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(x_first_el.items())])
else:
x_first_el_shape = [1] + list(x_first_el.shape)
if y_is_dict:
y_first_el_shape = dict(
[(k, [1] + list(v.shape)) for k, v in list(y_first_el.items())])
elif y is None:
y_first_el_shape = None
else:
y_first_el_shape = ([1] + list(y_first_el[0].shape if isinstance(
y_first_el, list) else y_first_el.shape))
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_first_el_shape, y_first_el_shape, n_classes, batch_size)
# Input dtype of x_first_el.
if x_is_dict:
self._input_dtype = dict(
[(k, _check_dtype(v.dtype)) for k, v in list(x_first_el.items())])
else:
self._input_dtype = _check_dtype(x_first_el.dtype)
# Output dtype of y_first_el.
def check_y_dtype(el):
if isinstance(el, np.ndarray):
return el.dtype
elif isinstance(el, list):
return check_y_dtype(el[0])
else:
return _check_dtype(np.dtype(type(el)))
# Output types are floats, due to both softmaxes and regression req.
if n_classes is not None and (y is None or not y_is_dict) and n_classes > 0:
self._output_dtype = np.float32
elif y_is_dict:
self._output_dtype = dict(
[(k, check_y_dtype(v)) for k, v in list(y_first_el.items())])
elif y is None:
self._output_dtype = None
else:
self._output_dtype = check_y_dtype(y_first_el)
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self):
"""Returns a function, that will sample data and provide it to placeholders.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
self.stopped = False
def _feed_dict_fn():
"""Samples data and provides it to placeholders.
Returns:
`dict` of input and output tensors.
"""
def init_array(shape, dtype):
"""Initialize array of given shape or dict of shapes and dtype."""
if shape is None:
return None
elif isinstance(shape, dict):
return dict([(k, np.zeros(shape[k], dtype[k]))
for k in list(shape.keys())])
else:
return np.zeros(shape, dtype=dtype)
def put_data_array(dest, index, source=None, n_classes=None):
"""Puts data array into container."""
if source is None:
dest = dest[:index]
elif n_classes is not None and n_classes > 1:
if len(self.output_shape) == 2:
dest.itemset((index, source), 1.0)
else:
for idx, value in enumerate(source):
dest.itemset(tuple([index, idx, value]), 1.0)
else:
if len(dest.shape) > 1:
dest[index, :] = source
else:
dest[index] = source[0] if isinstance(source, list) else source
return dest
def put_data_array_or_dict(holder, index, data=None, n_classes=None):
"""Puts data array or data dictionary into container."""
if holder is None:
return None
if isinstance(holder, dict):
if data is None:
data = {k: None for k in holder.keys()}
assert isinstance(data, dict)
for k in holder.keys():
num_classes = n_classes[k] if (n_classes is not None and
k in n_classes) else None
holder[k] = put_data_array(holder[k], index, data[k], num_classes)
else:
holder = put_data_array(holder, index, data, n_classes)
return holder
if self.stopped:
raise StopIteration
inp = init_array(self.input_shape, self._input_dtype)
out = init_array(self.output_shape, self._output_dtype)
for i in xrange(self._batch_size):
# Add handling when queue ends.
try:
next_inp = six.next(self._x)
inp = put_data_array_or_dict(inp, i, next_inp, None)
except StopIteration:
self.stopped = True
if i == 0:
raise
inp = put_data_array_or_dict(inp, i, None, None)
out = put_data_array_or_dict(out, i, None, None)
break
if self._y is not None:
next_out = six.next(self._y)
out = put_data_array_or_dict(out, i, next_out, self.n_classes)
# creating feed_dict
if isinstance(inp, dict):
feed_dict = dict([(self._input_placeholder[k].name, inp[k])
for k in list(self._input_placeholder.keys())])
else:
feed_dict = {self._input_placeholder.name: inp}
if self._y is not None:
if isinstance(out, dict):
feed_dict.update(
dict([(self._output_placeholder[k].name, out[k])
for k in list(self._output_placeholder.keys())]))
else:
feed_dict.update({self._output_placeholder.name: out})
return feed_dict
return _feed_dict_fn
class DaskDataFeeder(object):
"""Data feeder for that reads data from dask.Series and dask.DataFrame.
Numpy arrays can be serialized to disk and it's possible to do random seeks
into them. DaskDataFeeder will remove requirement to have full dataset in the
memory and still do random seeks for sampling of batches.
"""
def __init__(self,
x,
y,
n_classes,
batch_size,
shuffle=True,
random_state=None,
epochs=None):
"""Initializes a DaskDataFeeder instance.
Args:
x: iterator that returns for each element, returns features.
y: iterator that returns for each element, returns 1 or many classes /
regression values.
n_classes: indicator of how many classes the label has.
batch_size: Mini batch size to accumulate.
shuffle: Whether to shuffle the inputs.
random_state: random state for RNG. Note that it will mutate so use a
int value for this if you want consistent sized batches.
epochs: Number of epochs to run.
Attributes:
x: input features.
y: input label.
n_classes: number of classes.
batch_size: mini batch size to accumulate.
input_shape: shape of the input.
output_shape: shape of the output.
input_dtype: dtype of input.
output_dtype: dtype of output.
Raises:
ValueError: if `x` or `y` are `dict`, as they are not supported currently.
"""
if isinstance(x, dict) or isinstance(y, dict):
raise ValueError(
'DaskDataFeeder does not support dictionaries at the moment.')
# pylint: disable=invalid-name,super-init-not-called
import dask.dataframe as dd # pylint: disable=g-import-not-at-top
# TODO(terrytangyuan): check x and y dtypes in dask_io like pandas
self._x = x
self._y = y
# save column names
self._x_columns = list(x.columns)
if isinstance(y.columns[0], str):
self._y_columns = list(y.columns)
else:
# deal with cases where two DFs have overlapped default numeric colnames
self._y_columns = len(self._x_columns) + 1
self._y = self._y.rename(columns={y.columns[0]: self._y_columns})
# TODO(terrytangyuan): deal with unsupervised cases
# combine into a data frame
self.df = dd.multi.concat([self._x, self._y], axis=1)
self.n_classes = n_classes
x_count = x.count().compute()[0]
x_shape = (x_count, len(self._x.columns))
y_shape = (x_count, len(self._y.columns))
# TODO(terrytangyuan): Add support for shuffle and epochs.
self._shuffle = shuffle
self.epochs = epochs
self.input_shape, self.output_shape, self._batch_size = _get_in_out_shape(
x_shape, y_shape, n_classes, batch_size)
self.sample_fraction = self._batch_size / float(x_count)
self._input_dtype = _check_dtype(self._x.dtypes[0])
self._output_dtype = _check_dtype(self._y.dtypes[self._y_columns])
if random_state is None:
self.random_state = 66
else:
self.random_state = random_state
def get_feed_params(self):
"""Function returns a `dict` with data feed params while training.
Returns:
A `dict` with data feed params while training.
"""
return {'batch_size': self._batch_size}
def get_feed_dict_fn(self, input_placeholder, output_placeholder):
"""Returns a function, that will sample data and provide it to placeholders.
Args:
input_placeholder: tf.placeholder for input features mini batch.
output_placeholder: tf.placeholder for output labels.
Returns:
A function that when called samples a random subset of batch size
from x and y.
"""
def _feed_dict_fn():
"""Samples data and provides it to placeholders."""
# TODO(ipolosukhin): option for with/without replacement (dev version of
# dask)
sample = self.df.random_split(
[self.sample_fraction, 1 - self.sample_fraction],
random_state=self.random_state)
inp = extract_pandas_matrix(sample[0][self._x_columns].compute()).tolist()
out = extract_pandas_matrix(sample[0][self._y_columns].compute())
# convert to correct dtype
inp = np.array(inp, dtype=self._input_dtype)
# one-hot encode out for each class for cross entropy loss
if HAS_PANDAS:
import pandas as pd # pylint: disable=g-import-not-at-top
if not isinstance(out, pd.Series):
out = out.flatten()
out_max = self._y.max().compute().values[0]
encoded_out = np.zeros((out.size, out_max + 1), dtype=self._output_dtype)
encoded_out[np.arange(out.size), out] = 1
return {input_placeholder.name: inp, output_placeholder.name: encoded_out}
return _feed_dict_fn
| apache-2.0 |
analysiscenter/dataset | batchflow/opensets/imagenette.py | 1 | 5401 | """ Contains Imagenette and Imagewoof datasets """
import os
from os.path import dirname, basename
import tempfile
import logging
import urllib.request
import tarfile
from io import BytesIO
import PIL
import tqdm
import numpy as np
from sklearn.preprocessing import LabelEncoder
from . import ImagesOpenset
logger = logging.getLogger('SmallImagenet')
class Imagenette(ImagesOpenset):
""" Imagenette dataset.
Contains 12894 train and 500 test images. Total size 1.4GB.
Notes
-----
- Datasets contain both grayscale and colored images, ratio ~ 1:100
Argument `drop_grayscale` controls whether grayscale images should be dropped.
"""
SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette.tgz'
num_classes = 10
def __init__(self, *args, drop_grayscale=True, bar=False, preloaded=None, train_test=True, **kwargs):
self.bar = tqdm.tqdm(total=2) if bar else None
self.drop_grayscale = drop_grayscale
super().__init__(*args, preloaded=preloaded, train_test=train_test, **kwargs)
if self.bar:
self.bar.close()
def download(self, path=None):
""" Load data from website and extract it into numpy arrays """
def _image_class(filepath):
""" Image's class is determined by the parent folder of the image """
return basename(dirname(filepath))
def _is_train(filepath):
""" Whether image belongs to train or val parts can be determined by
the level 2 parent folder of the image
"""
return basename(dirname(dirname(filepath))) == 'train'
def _extract(archive, member):
data = archive.extractfile(member).read()
return PIL.Image.open(BytesIO(data))
def _is_file_rgb(archive, member):
""" Check whether archive member is a file.
In case `drop_grayscale` set to `True` it verifies that the member is the RGB mode image as well.
"""
if not self.drop_grayscale:
return member.isfile()
return member.isfile() and _extract(archive, member).mode == 'RGB'
def _gather_extracted(archive, files):
images = np.array([_extract(archive, file) for file in files], dtype=object)
labels = np.array([_image_class(file.name) for file in files])
labels_encoded = LabelEncoder().fit_transform(labels)
return images, labels_encoded
if path is None:
path = tempfile.gettempdir()
filename = os.path.basename(self.SOURCE_URL)
localname = os.path.join(path, filename)
if not os.path.isfile(localname):
logger.info("Downloading %s", filename)
urllib.request.urlretrieve(self.SOURCE_URL, localname)
logger.info("Downloaded %s", filename)
if self.bar:
self.bar.update(1)
logger.info("Extracting...")
with tarfile.open(localname, "r:gz") as archive:
files_in_archive = archive.getmembers()
train_files = [file for file in files_in_archive if _is_file_rgb(archive, file) and _is_train(file.name)]
train_data = _gather_extracted(archive, train_files)
test_files = [file for file in files_in_archive if _is_file_rgb(archive, file) and not _is_train(file.name)]
test_data = _gather_extracted(archive, test_files)
logger.info("Extracted")
if self.bar:
self.bar.update(1)
images = np.concatenate([train_data[0], test_data[0]])
labels = np.concatenate([train_data[1], test_data[1]])
preloaded = images, labels
train_len, test_len = len(train_data[0]), len(test_data[0])
index, train_index, test_index = self._infer_train_test_index(train_len, test_len)
return preloaded, index, train_index, test_index
class Imagenette320(Imagenette):
""" The '320px' version of Imagenette.
The shortest size resized to that size with their aspect ratio maintained.
Contains 12894 train and 500 test images. Total size 325MB.
"""
SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette-320.tgz'
class Imagenette160(Imagenette):
""" The '160px' version of Imagenette.
The shortest size resized to that size with their aspect ratio maintained.
Contains 12894 train and 500 test images. Total size 98MB.
"""
SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagenette-160.tgz'
class ImageWoof(Imagenette):
""" Imagewoof dataset. See the https://github.com/fastai/imagenette for details.
Contains 12454 train and 500 test images. Total size 1.3GB
"""
SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagewoof.tgz'
class ImageWoof320(Imagenette):
""" The '320px' version of Imagewoof.
The shortest size resized to that size with their aspect ratio maintained.
Contains 12454 train and 500 test images. Total size 313MB.
"""
SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagewoof-320.tgz'
class ImageWoof160(Imagenette):
""" The '160px' version of Imagewoof.
The shortest size resized to that size with their aspect ratio maintained.
Contains 12454 train and 500 test images. Total size 88MB
"""
SOURCE_URL = 'https://s3.amazonaws.com/fast-ai-imageclas/imagewoof-160.tgz'
| apache-2.0 |
udacity/ggplot | ggplot/geoms/geom_tile.py | 12 | 3695 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import pandas as pd
import numpy as np
from .geom import geom
from matplotlib.patches import Rectangle
import matplotlib.colors as colors
import matplotlib.colorbar as colorbar
class geom_tile(geom):
DEFAULT_AES = {}
REQUIRED_AES = {'x', 'y', 'fill'}
DEFAULT_PARAMS = {'stat': 'identity', 'position': 'identity'}
_aes_renames = {}
_units = set()
def _plot_unit(self, pinfo, ax):
x = pinfo.pop('x')
y = pinfo.pop('y')
fill = pinfo.pop('fill')
# TODO: Fix this hack!
# Currently, if the fill is specified in the ggplot aes wrapper, ggplot
# will assign colors without regard to the fill values. This is okay for
# categorical maps but not heatmaps. At this stage in the pipeline the
# geom can't recover the original values.
#
# However, if the fill is specified in the geom_tile aes wrapper, the
# original fill values are sent unaltered, so we can make a heat map
# with the values.
# Was the fill specified in geom wrapper only? (i.e. not in ggplot)
if 'fill' in self.aes_unique_to_geom:
# Determine if there are non-numeric values.
if False in [isinstance(v, (int, long, float, complex)) for v in set(fill)]:
# No need to handle this case. Instruct the user to put categorical
# values in the ggplot wrapper.
raise Exception('For categorical fill values specify fill in the ggplot aes instead of the geom_tile aes.')
# All values are numeric so determine fill using colormap.
else:
fill_min = np.min(fill)
fill_max = np.max(fill)
if np.isnan(fill_min):
raise Exception('Fill values cannot contain NaN values.')
fill_rng = float(fill_max - fill_min)
fill_vals = (fill - fill_min) / fill_rng
cmap = self.gg.colormap(fill_vals.tolist())
fill = [colors.rgb2hex(c) for c in cmap[::, :3]]
df = pd.DataFrame(
{'x': x, 'y': y, 'fill': fill}).set_index(['x', 'y']).unstack(0)
# Setup axes.
x_ticks = range(2*len(set(x)) + 1)
y_ticks = range(2*len(set(y)) + 1)
x_indices = sorted(set(x))
y_indices = sorted(set(y))
# Setup box plotting parameters.
x_start = 0
y_start = 0
x_step = 2
y_step = 2
# Plot grid.
on_y = y_start
for yi in xrange(len(y_indices)):
on_x = x_start
for xi in xrange(len(x_indices)):
color = df.iloc[yi,xi]
if not isinstance(color, float):
ax.add_patch(Rectangle((on_x, on_y), x_step, y_step, facecolor=color))
on_x += x_step
on_y += y_step
# Draw the colorbar scale if drawing a heat map.
if 'cmap' in locals():
norm = colors.Normalize(vmin = fill_min, vmax = fill_max)
cax, kw = colorbar.make_axes(ax)
cax.hold(True)
colorbar.ColorbarBase(cax, cmap = self.gg.colormap, norm = norm)
# Set axis labels and ticks.
x_labels = ['']*(len(x_indices)+1)
for i,v in enumerate(x_indices): x_labels.insert(2*i+1, v)
y_labels = ['']*(len(y_indices)+1)
for i,v in enumerate(y_indices): y_labels.insert(2*i+1, v)
ax.set_xticklabels(x_labels)
ax.set_xticks(x_ticks)
ax.set_yticklabels(y_labels)
ax.set_yticks(y_ticks)
| bsd-2-clause |
UDST/choicemodels | tests/test_mnl_new.py | 1 | 3024 | """
These are tests for the refactored choicemodels MNL codebase.
"""
import numpy as np
import pandas as pd
import pytest
from pandas.testing import assert_frame_equal
from patsy import dmatrix
from choicemodels import MultinomialLogit
from choicemodels.tools import MergedChoiceTable
@pytest.fixture
def obs():
d1 = {'oid': np.arange(100),
'obsval': np.random.random(100),
'choice': np.random.choice(np.arange(5), size=100)}
return pd.DataFrame(d1).set_index('oid')
@pytest.fixture
def alts():
d2 = {'aid': np.arange(5),
'altval': np.random.random(5)}
return pd.DataFrame(d2).set_index('aid')
def test_mnl(obs, alts):
"""
Confirm that MNL estimation runs, using the native estimator.
"""
model_expression = 'obsval + altval - 1'
mct = MergedChoiceTable(obs, alts, 'choice')
m = MultinomialLogit(mct, model_expression)
print(m.fit())
def test_mnl_estimation(obs, alts):
"""
Confirm that estimated params from the new interface match urbansim.urbanchoice.
Only runs if the urbansim package has been installed.
"""
try:
from urbansim.urbanchoice.mnl import mnl_estimate
except:
print("Comparison of MNL estimation results skipped because urbansim is not installed")
return
model_expression = 'obsval + altval - 1'
mct = MergedChoiceTable(obs, alts, 'choice')
# new interface
m = MultinomialLogit(mct, model_expression)
r = m.fit().get_raw_results()
# old interface
dm = dmatrix(model_expression, mct.to_frame())
chosen = np.reshape(mct.to_frame()[mct.choice_col].values, (100, 5))
log_lik, fit = mnl_estimate(np.array(dm), chosen, numalts=5)
for k,v in log_lik.items():
assert(v == pytest.approx(r['log_likelihood'][k], 0.00001))
assert_frame_equal(fit, r['fit_parameters'][['Coefficient', 'Std. Error', 'T-Score']])
def test_mnl_prediction(obs, alts):
"""
Confirm that fitted probabilities in the new codebase match urbansim.urbanchoice.
Only runs if the urbansim package has been installed.
"""
try:
from urbansim.urbanchoice.mnl import mnl_simulate
except:
print("Comparison of MNL simulation results skipped because urbansim is not installed")
return
# produce a fitted model
mct = MergedChoiceTable(obs, alts, 'choice', 5)
m = MultinomialLogit(mct, model_expression='obsval + altval - 1')
results = m.fit()
# get predicted probabilities using choicemodels
probs1 = results.probabilities(mct)
# compare to probabilities from urbansim.urbanchoice
dm = dmatrix(results.model_expression, data=mct.to_frame(), return_type='dataframe')
probs = mnl_simulate(data=dm, coeff=results.fitted_parameters,
numalts=mct.sample_size, returnprobs=True)
df = mct.to_frame()
df['prob'] = probs.flatten()
probs2 = df.prob
pd.testing.assert_series_equal(probs1, probs2) | bsd-3-clause |
patricksnape/menpo | menpo/image/base.py | 2 | 131364 | from typing import Iterable, Optional
from warnings import warn
import PIL.Image as PILImage
import numpy as np
from menpo.base import MenpoDeprecationWarning, Vectorizable, copy_landmarks_and_path
from menpo.landmark import Landmarkable
from menpo.shape import PointCloud, bounding_box
from menpo.transform import (
AlignmentUniformScale,
Homogeneous,
NonUniformScale,
Rotation,
Translation,
scale_about_centre,
transform_about_centre,
)
from menpo.visualize.base import ImageViewer, LandmarkableViewable, Viewable
from .interpolation import scipy_interpolation
try:
from .interpolation import cv2_perspective_interpolation
except ImportError:
warn("Falling back to scipy interpolation for affine warps")
cv2_perspective_interpolation = None # type: ignore
from .patches import (
extract_patches_with_slice,
set_patches,
extract_patches_by_sampling,
)
# Cache the greyscale luminosity coefficients as they are invariant.
_greyscale_luminosity_coef: Optional[np.ndarray] = None
class ImageBoundaryError(ValueError):
r"""
Exception that is thrown when an attempt is made to crop an image beyond
the edge of it's boundary.
Parameters
----------
requested_min : ``(d,)`` `ndarray`
The per-dimension minimum index requested for the crop
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index requested for the crop
snapped_min : ``(d,)`` `ndarray`
The per-dimension minimum index that could be used if the crop was
constrained to the image boundaries.
requested_max : ``(d,)`` `ndarray`
The per-dimension maximum index that could be used if the crop was
constrained to the image boundaries.
"""
def __init__(self, requested_min, requested_max, snapped_min, snapped_max):
super(ImageBoundaryError, self).__init__()
self.requested_min = requested_min
self.requested_max = requested_max
self.snapped_min = snapped_min
self.snapped_max = snapped_max
def indices_for_image_of_shape(shape):
r"""
The indices of all pixels in an image with a given shape (without
channel information).
Parameters
----------
shape : ``(n_dims, n_pixels)`` `ndarray`
The shape of the image.
Returns
-------
indices : `ndarray`
The indices of all the pixels in the image.
"""
return np.indices(shape).reshape([len(shape), -1]).T
def normalize_pixels_range(pixels, error_on_unknown_type=True):
r"""
Normalize the given pixels to the Menpo valid floating point range, [0, 1].
This is a single place to handle normalising pixels ranges. At the moment
the supported types are uint8 and uint16.
Parameters
----------
pixels : `ndarray`
The pixels to normalize in the floating point range.
error_on_unknown_type : `bool`, optional
If ``True``, this method throws a ``ValueError`` if the given pixels
array is an unknown type. If ``False``, this method performs no
operation.
Returns
-------
normalized_pixels : `ndarray`
The normalized pixels in the range [0, 1].
Raises
------
ValueError
If ``pixels`` is an unknown type and ``error_on_unknown_type==True``
"""
dtype = pixels.dtype
if dtype == np.uint8:
max_range = 255.0
elif dtype == np.uint16:
max_range = 65535.0
else:
if error_on_unknown_type:
raise ValueError(
"Unexpected dtype ({}) - normalisation range "
"is unknown".format(dtype)
)
else:
# Do nothing
return pixels
# This multiplication is quite a bit faster than just dividing - will
# automatically cast it up to float64
return pixels * (1.0 / max_range)
def denormalize_pixels_range(pixels, out_dtype):
"""
Denormalize the given pixels array into the range of the given out dtype.
If the given pixels are floating point or boolean then the values
are scaled appropriately and cast to the output dtype. If the pixels
are already the correct dtype they are immediately returned.
Floating point pixels must be in the range [0, 1].
Currently uint8 and uint16 output dtypes are supported.
Parameters
----------
pixels : `ndarray`
The pixels to denormalize.
out_dtype : `np.dtype`
The numpy data type to output and scale the values into.
Returns
-------
out_pixels : `ndarray`
Will be in the correct range and will have type ``out_dtype``.
Raises
------
ValueError
Pixels are floating point and range outside [0, 1]
ValueError
Input pixels dtype not in the set {float32, float64, bool}.
ValueError
Output dtype not in the set {uint8, uint16}
"""
in_dtype = pixels.dtype
if in_dtype == out_dtype:
return pixels
if np.issubclass_(in_dtype.type, np.floating) or in_dtype == float:
if np.issubclass_(out_dtype, np.floating) or out_dtype == float:
return pixels.astype(out_dtype)
else:
p_min = pixels.min()
p_max = pixels.max()
if p_min < 0.0 or p_max > 1.0:
raise ValueError(
"Unexpected input range [{}, {}] - pixels "
"must be in the range [0, 1]".format(p_min, p_max)
)
elif in_dtype != bool:
raise ValueError(
"Unexpected input dtype ({}) - only float32, float64 "
"and bool supported".format(in_dtype)
)
if out_dtype == np.uint8:
max_range = 255.0
elif out_dtype == np.uint16:
max_range = 65535.0
else:
raise ValueError(
"Unexpected output dtype ({}) - normalisation range "
"is unknown".format(out_dtype)
)
return (pixels * max_range).astype(out_dtype)
def channels_to_back(pixels):
r"""
Roll the channels from the front to the back for an image. If the image
that is passed is already a numpy array, then that is also fine.
Always returns a numpy array because our :map:`Image` containers do not
support channels at the back.
Parameters
----------
image : `ndarray`
The pixels or image to roll the channel back for.
Returns
-------
rolled_pixels : `ndarray`
The numpy array of pixels with the channels on the last axis.
"""
return np.require(
np.rollaxis(pixels, 0, pixels.ndim), dtype=pixels.dtype, requirements=["C"]
)
def channels_to_front(pixels):
r"""
Convert the given pixels array (channels assumed to be at the last axis
as is common in other imaging packages) into a numpy array.
Parameters
----------
pixels : ``(H, W, C)`` `buffer`
The pixels to convert to the Menpo channels at axis 0.
Returns
-------
pixels : ``(C, H, W)`` `ndarray`
Numpy array, channels as axis 0.
"""
if not isinstance(pixels, np.ndarray):
pixels = np.array(pixels)
return np.require(np.rollaxis(pixels, -1), dtype=pixels.dtype, requirements=["C"])
class Image(Vectorizable, Landmarkable, Viewable, LandmarkableViewable):
r"""
An n-dimensional image.
Images are n-dimensional homogeneous regular arrays of data. Each
spatially distinct location in the array is referred to as a `pixel`.
At a pixel, ``k`` distinct pieces of information can be stored. Each
datum at a pixel is refereed to as being in a `channel`. All pixels in
the image have the same number of channels, and all channels have the
same data-type (`float64`).
Parameters
----------
image_data : ``(C, M, N ..., Q)`` `ndarray`
Array representing the image pixels, with the first axis being
channels.
copy : `bool`, optional
If ``False``, the ``image_data`` will not be copied on assignment.
Note that this will miss out on additional checks. Further note that we
still demand that the array is C-contiguous - if it isn't, a copy will
be generated anyway.
In general, this should only be used if you know what you are doing.
Raises
------
Warning
If ``copy=False`` cannot be honoured
ValueError
If the pixel array is malformed
"""
def __init__(self, image_data, copy=True):
super(Image, self).__init__()
if not copy:
if not image_data.flags.c_contiguous:
image_data = np.array(image_data, copy=True, order="C")
warn(
"The copy flag was NOT honoured. A copy HAS been made. "
"Please ensure the data you pass is C-contiguous."
)
else:
image_data = np.array(image_data, copy=True, order="C")
# Degenerate case whereby we can just put the extra axis
# on ourselves
if image_data.ndim == 2:
# Ensures that the data STAYS C-contiguous
image_data = image_data.reshape((1,) + image_data.shape)
if image_data.ndim < 2:
raise ValueError(
"Pixel array has to be 2D (implicitly 1 channel, "
"2D shape) or 3D+ (n_channels, 2D+ shape) "
" - a {}D array "
"was provided".format(image_data.ndim)
)
self.pixels = image_data
@classmethod
def init_blank(cls, shape, n_channels=1, fill=0, dtype=float):
r"""
Returns a blank image.
Parameters
----------
shape : `tuple` or `list`
The shape of the image. Any floating point values are rounded up
to the nearest integer.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
Returns
-------
blank_image : :map:`Image`
A new image of the requested size.
"""
# Ensure that the '+' operator means concatenate tuples
shape = tuple(np.ceil(shape).astype(int))
if fill == 0:
pixels = np.zeros((n_channels,) + shape, dtype=dtype)
else:
pixels = np.ones((n_channels,) + shape, dtype=dtype) * fill
# We know there is no need to copy...
return cls(pixels, copy=False)
@classmethod
def init_from_rolled_channels(cls, pixels):
r"""
Deprecated - please use the equivalent ``init_from_channels_at_back`` method.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .init_from_channels_at_back instead.",
MenpoDeprecationWarning,
)
return cls.init_from_channels_at_back(pixels)
@classmethod
def init_from_channels_at_back(cls, pixels):
r"""
Create an Image from a set of pixels where the channels axis is on
the last axis (the back). This is common in other frameworks, and
therefore this method provides a convenient means of creating a menpo
Image from such data. Note that a copy is always created due to the
need to rearrange the data.
Parameters
----------
pixels : ``(M, N ..., Q, C)`` `ndarray`
Array representing the image pixels, with the last axis being
channels.
Returns
-------
image : :map:`Image`
A new image from the given pixels, with the FIRST axis as the
channels.
Raises
------
ValueError
If image is not at least 2D, i.e. has at least 2 dimensions plus
the channels in the end.
"""
if pixels.ndim == 2:
pixels = pixels[..., None]
if pixels.ndim < 2:
raise ValueError(
"Pixel array has to be 2D "
"(2D shape, implicitly 1 channel) "
"or 3D+ (2D+ shape, n_channels) "
" - a {}D array "
"was provided".format(pixels.ndim)
)
return cls(channels_to_front(pixels))
@classmethod
def init_from_pointcloud(
cls,
pointcloud,
group=None,
boundary=0,
n_channels=1,
fill=0,
dtype=float,
return_transform=False,
):
r"""
Create an Image that is big enough to contain the given pointcloud.
The pointcloud will be translated to the origin and then translated
according to its bounds in order to fit inside the new image.
An optional boundary can be provided in order to increase the space
around the boundary of the pointcloud. The boundary will be added
to *all sides of the image* and so a boundary of 5 provides 10 pixels
of boundary total for each dimension.
Parameters
----------
pointcloud : :map:`PointCloud`
Pointcloud to place inside the newly created image.
group : `str`, optional
If ``None``, the pointcloud will only be used to create the image.
If a `str` then the pointcloud will be attached as a landmark
group to the image, with the given string as key.
boundary : `float`
A optional padding distance that is added to the pointcloud bounds.
Default is ``0``, meaning the max/min of tightest possible
containing image is returned.
n_channels : `int`, optional
The number of channels to create the image with.
fill : `int`, optional
The value to fill all pixels with.
dtype : numpy data type, optional
The data type of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
adjust the PointCloud in order to build the image, is returned.
Returns
-------
image : ``type(cls)`` Image or subclass
A new image with the same size as the given pointcloud, optionally
with the pointcloud attached as landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
# Translate pointcloud to the origin
minimum = pointcloud.bounds(boundary=boundary)[0]
tr = Translation(-minimum)
origin_pc = tr.apply(pointcloud)
image_shape = origin_pc.range(boundary=boundary)
new_image = cls.init_blank(
image_shape, n_channels=n_channels, fill=fill, dtype=dtype
)
if group is not None:
new_image.landmarks[group] = origin_pc
if return_transform:
return new_image, tr
else:
return new_image
def as_masked(self, mask=None, copy=True):
r"""
Return a copy of this image with an attached mask behavior.
A custom mask may be provided, or ``None``. See the :map:`MaskedImage`
constructor for details of how the kwargs will be handled.
Parameters
----------
mask : ``(self.shape)`` `ndarray` or :map:`BooleanImage`
A mask to attach to the newly generated masked image.
copy : `bool`, optional
If ``False``, the produced :map:`MaskedImage` will share pixels with
``self``. Only suggested to be used for performance.
Returns
-------
masked_image : :map:`MaskedImage`
An image with the same pixels and landmarks as this one, but with
a mask.
"""
from menpo.image import MaskedImage
return copy_landmarks_and_path(
self, MaskedImage(self.pixels, mask=mask, copy=copy)
)
@property
def n_dims(self):
r"""
The number of dimensions in the image. The minimum possible ``n_dims``
is 2.
:type: `int`
"""
return len(self.shape)
@property
def n_pixels(self):
r"""
Total number of pixels in the image ``(prod(shape),)``
:type: `int`
"""
return self.pixels[0, ...].size
@property
def n_elements(self):
r"""
Total number of data points in the image
``(prod(shape), n_channels)``
:type: `int`
"""
return self.pixels.size
@property
def n_channels(self):
"""
The number of channels on each pixel in the image.
:type: `int`
"""
return self.pixels.shape[0]
@property
def width(self):
r"""
The width of the image.
This is the width according to image semantics, and is thus the size
of the **last** dimension.
:type: `int`
"""
return self.pixels.shape[-1]
@property
def height(self):
r"""
The height of the image.
This is the height according to image semantics, and is thus the size
of the **second to last** dimension.
:type: `int`
"""
return self.pixels.shape[-2]
@property
def shape(self):
r"""
The shape of the image
(with ``n_channel`` values at each point).
:type: `tuple`
"""
return self.pixels.shape[1:]
def bounds(self):
r"""
The bounds of the image, minimum is always (0, 0). The maximum is
the maximum **index** that can be used to index into the image for each
dimension. Therefore, bounds will be of the form:
((0, 0), (self.height - 1, self.width - 1)) for a 2D image.
Note that this is akin to supporting a nearest neighbour interpolation.
Although the *actual* maximum subpixel value would be something
like ``self.height - eps`` where ``eps`` is some value arbitrarily
close to 0, this value at least allows sampling without worrying about
floating point error.
:type: `tuple`
"""
return (0,) * self.n_dims, tuple(s - 1 for s in self.shape)
def diagonal(self):
r"""
The diagonal size of this image
:type: `float`
"""
return np.sqrt(np.sum(np.array(self.shape) ** 2))
def centre(self):
r"""
The geometric centre of the Image - the subpixel that is in the
middle.
Useful for aligning shapes and images.
:type: (``n_dims``,) `ndarray`
"""
return np.array(self.shape, dtype=np.double) / 2
def _str_shape(self):
if self.n_dims > 2:
return " x ".join(str(dim) for dim in self.shape)
elif self.n_dims == 2:
return "{}W x {}H".format(self.width, self.height)
def indices(self):
r"""
Return the indices of all pixels in this image.
:type: (``n_dims``, ``n_pixels``) ndarray
"""
return indices_for_image_of_shape(self.shape)
def _as_vector(self, keep_channels=False):
r"""
The vectorized form of this image.
Parameters
----------
keep_channels : `bool`, optional
========== =============================
Value Return shape
========== =============================
`False` ``(n_channels * n_pixels,)``
`True` ``(n_channels, n_pixels)``
========== =============================
Returns
-------
vec : (See ``keep_channels`` above) `ndarray`
Flattened representation of this image, containing all pixel
and channel information.
"""
if keep_channels:
return self.pixels.reshape([self.n_channels, -1])
else:
return self.pixels.ravel()
def from_vector(self, vector, n_channels=None, copy=True):
r"""
Takes a flattened vector and returns a new image formed by reshaping
the vector to the correct pixels and channels.
The `n_channels` argument is useful for when we want to add an extra
channel to an image but maintain the shape. For example, when
calculating the gradient.
Note that landmarks are transferred in the process.
Parameters
----------
vector : ``(n_parameters,)`` `ndarray`
A flattened vector of all pixels and channels of an image.
n_channels : `int`, optional
If given, will assume that vector is the same shape as this image,
but with a possibly different number of channels.
copy : `bool`, optional
If ``False``, the vector will not be copied in creating the new
image.
Returns
-------
image : :map:`Image`
New image of same shape as this image and the number of
specified channels.
Raises
------
Warning
If the ``copy=False`` flag cannot be honored
"""
# This is useful for when we want to add an extra channel to an image
# but maintain the shape. For example, when calculating the gradient
n_channels = self.n_channels if n_channels is None else n_channels
image_data = vector.reshape((n_channels,) + self.shape)
new_image = Image(image_data, copy=copy)
new_image.landmarks = self.landmarks
return new_image
def _from_vector_inplace(self, vector, copy=True):
r"""
Takes a flattened vector and update this image by
reshaping the vector to the correct dimensions.
Parameters
----------
vector : ``(n_pixels,)`` `bool ndarray`
A vector vector of all the pixels of a :map:`BooleanImage`.
copy: `bool`, optional
If ``False``, the vector will be set as the pixels. If ``True``, a
copy of the vector is taken.
Raises
------
Warning
If ``copy=False`` flag cannot be honored
Note
----
For :map:`BooleanImage` this is rebuilding a boolean image **itself**
from boolean values. The mask is in no way interpreted in performing
the operation, in contrast to :map:`MaskedImage`, where only the masked
region is used in :meth:`from_vector_inplace` and :meth:`as_vector`.
"""
image_data = vector.reshape(self.pixels.shape)
if not copy:
if not image_data.flags.c_contiguous:
warn(
"The copy flag was NOT honoured. A copy HAS been made. "
"Please ensure the data you pass is C-contiguous."
)
image_data = np.array(
image_data, copy=True, order="C", dtype=image_data.dtype
)
else:
image_data = np.array(
image_data, copy=True, order="C", dtype=image_data.dtype
)
self.pixels = image_data
def extract_channels(self, channels):
r"""
A copy of this image with only the specified channels.
Parameters
----------
channels : `int` or `[int]`
The channel index or `list` of channel indices to retain.
Returns
-------
image : `type(self)`
A copy of this image with only the channels requested.
"""
copy = self.copy()
if not isinstance(channels, list):
channels = [channels] # ensure we don't remove the channel axis
copy.pixels = self.pixels[channels]
return copy
def as_histogram(self, keep_channels=True, bins="unique"):
r"""
Histogram binning of the values of this image.
Parameters
----------
keep_channels : `bool`, optional
If set to ``False``, it returns a single histogram for all the
channels of the image. If set to ``True``, it returns a `list` of
histograms, one for each channel.
bins : ``{unique}``, positive `int` or sequence of scalars, optional
If set equal to ``'unique'``, the bins of the histograms are centred
on the unique values of each channel. If set equal to a positive
`int`, then this is the number of bins. If set equal to a
sequence of scalars, these will be used as bins centres.
Returns
-------
hist : `ndarray` or `list` with ``n_channels`` `ndarrays` inside
The histogram(s). If ``keep_channels=False``, then hist is an
`ndarray`. If ``keep_channels=True``, then hist is a `list` with
``len(hist)=n_channels``.
bin_edges : `ndarray` or `list` with `n_channels` `ndarrays` inside
An array or a list of arrays corresponding to the above histograms
that store the bins' edges.
Raises
------
ValueError
Bins can be either 'unique', positive int or a sequence of scalars.
Examples
--------
Visualizing the histogram when a list of array bin edges is provided:
>>> hist, bin_edges = image.as_histogram()
>>> for k in range(len(hist)):
>>> plt.subplot(1,len(hist),k)
>>> width = 0.7 * (bin_edges[k][1] - bin_edges[k][0])
>>> centre = (bin_edges[k][:-1] + bin_edges[k][1:]) / 2
>>> plt.bar(centre, hist[k], align='center', width=width)
"""
# parse options
if isinstance(bins, str):
if bins == "unique":
bins = 0
else:
raise ValueError(
"Bins can be either 'unique', positive int or"
"a sequence of scalars."
)
elif isinstance(bins, int) and bins < 1:
raise ValueError(
"Bins can be either 'unique', positive int or a " "sequence of scalars."
)
# compute histogram
vec = self.as_vector(keep_channels=keep_channels)
if len(vec.shape) == 1 or vec.shape[0] == 1:
if bins == 0:
bins = np.unique(vec)
hist, bin_edges = np.histogram(vec, bins=bins)
else:
hist = []
bin_edges = []
num_bins = bins
for ch in range(vec.shape[0]):
if bins == 0:
num_bins = np.unique(vec[ch, :])
h_tmp, c_tmp = np.histogram(vec[ch, :], bins=num_bins)
hist.append(h_tmp)
bin_edges.append(c_tmp)
return hist, bin_edges
def _view_2d(
self,
figure_id=None,
new_figure=False,
channels=None,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
r"""
View the image using the default image viewer. This method will appear
on the Image as ``view`` if the Image is 2D.
Returns
-------
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated.
Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36,
hanning, hamming, hermite, kaiser, quadric, catrom, gaussian,
bessel, mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes.
Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : {``normal``, ``italic``, ``oblique``}, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None``, optional
The size of the figure in inches.
Returns
-------
viewer : `ImageViewer`
The image viewing object.
"""
return ImageViewer(
figure_id, new_figure, self.n_dims, self.pixels, channels=channels
).render(
interpolation=interpolation,
cmap_name=cmap_name,
alpha=alpha,
render_axes=render_axes,
axes_font_name=axes_font_name,
axes_font_size=axes_font_size,
axes_font_style=axes_font_style,
axes_font_weight=axes_font_weight,
axes_x_limits=axes_x_limits,
axes_y_limits=axes_y_limits,
axes_x_ticks=axes_x_ticks,
axes_y_ticks=axes_y_ticks,
figure_size=figure_size,
)
def _view_landmarks_2d(
self,
channels=None,
group=None,
with_labels=None,
without_labels=None,
figure_id=None,
new_figure=False,
interpolation="bilinear",
cmap_name=None,
alpha=1.0,
render_lines=True,
line_colour=None,
line_style="-",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=5,
marker_face_colour=None,
marker_edge_colour=None,
marker_edge_width=1.0,
render_numbering=False,
numbers_horizontal_align="center",
numbers_vertical_align="bottom",
numbers_font_name="sans-serif",
numbers_font_size=10,
numbers_font_style="normal",
numbers_font_weight="normal",
numbers_font_colour="k",
render_legend=False,
legend_title="",
legend_font_name="sans-serif",
legend_font_style="normal",
legend_font_size=10,
legend_font_weight="normal",
legend_marker_scale=None,
legend_location=2,
legend_bbox_to_anchor=(1.05, 1.0),
legend_border_axes_pad=None,
legend_n_columns=1,
legend_horizontal_spacing=None,
legend_vertical_spacing=None,
legend_border=True,
legend_border_padding=None,
legend_shadow=False,
legend_rounded_corners=False,
render_axes=False,
axes_font_name="sans-serif",
axes_font_size=10,
axes_font_style="normal",
axes_font_weight="normal",
axes_x_limits=None,
axes_y_limits=None,
axes_x_ticks=None,
axes_y_ticks=None,
figure_size=(7, 7),
):
"""
Visualize the landmarks. This method will appear on the Image as
``view_landmarks`` if the Image is 2D.
Parameters
----------
channels : `int` or `list` of `int` or ``all`` or ``None``
If `int` or `list` of `int`, the specified channel(s) will be
rendered. If ``all``, all the channels will be rendered in subplots.
If ``None`` and the image is RGB, it will be rendered in RGB mode.
If ``None`` and the image is not RGB, it is equivalent to ``all``.
group : `str` or``None`` optional
The landmark group to be visualized. If ``None`` and there are more
than one landmark groups, an error is raised.
with_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, only show the given label(s). Should **not** be
used with the ``without_labels`` kwarg.
without_labels : ``None`` or `str` or `list` of `str`, optional
If not ``None``, show all except the given label(s). Should **not**
be used with the ``with_labels`` kwarg.
figure_id : `object`, optional
The id of the figure to be used.
new_figure : `bool`, optional
If ``True``, a new figure is created.
interpolation : See Below, optional
The interpolation used to render the image. For example, if
``bilinear``, the image will be smooth and if ``nearest``, the
image will be pixelated. Example options ::
{none, nearest, bilinear, bicubic, spline16, spline36, hanning,
hamming, hermite, kaiser, quadric, catrom, gaussian, bessel,
mitchell, sinc, lanczos}
cmap_name: `str`, optional,
If ``None``, single channel and three channel images default
to greyscale and rgb colormaps respectively.
alpha : `float`, optional
The alpha blending value, between 0 (transparent) and 1 (opaque).
render_lines : `bool`, optional
If ``True``, the edges will be rendered.
line_colour : See Below, optional
The colour of the lines.
Example options::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
line_style : ``{-, --, -., :}``, optional
The style of the lines.
line_width : `float`, optional
The width of the lines.
render_markers : `bool`, optional
If ``True``, the markers will be rendered.
marker_style : See Below, optional
The style of the markers. Example options ::
{., ,, o, v, ^, <, >, +, x, D, d, s, p, *, h, H, 1, 2, 3, 4, 8}
marker_size : `int`, optional
The size of the markers in points.
marker_face_colour : See Below, optional
The face (filling) colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_colour : See Below, optional
The edge colour of the markers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
marker_edge_width : `float`, optional
The width of the markers' edge.
render_numbering : `bool`, optional
If ``True``, the landmarks will be numbered.
numbers_horizontal_align : ``{center, right, left}``, optional
The horizontal alignment of the numbers' texts.
numbers_vertical_align : ``{center, top, bottom, baseline}``, optional
The vertical alignment of the numbers' texts.
numbers_font_name : See Below, optional
The font of the numbers. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
numbers_font_size : `int`, optional
The font size of the numbers.
numbers_font_style : ``{normal, italic, oblique}``, optional
The font style of the numbers.
numbers_font_weight : See Below, optional
The font weight of the numbers.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
numbers_font_colour : See Below, optional
The font colour of the numbers.
Example options ::
{r, g, b, c, m, k, w}
or
(3, ) ndarray
render_legend : `bool`, optional
If ``True``, the legend will be rendered.
legend_title : `str`, optional
The title of the legend.
legend_font_name : See below, optional
The font of the legend. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
legend_font_style : ``{normal, italic, oblique}``, optional
The font style of the legend.
legend_font_size : `int`, optional
The font size of the legend.
legend_font_weight : See Below, optional
The font weight of the legend.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold, demibold, demi, bold, heavy, extra bold, black}
legend_marker_scale : `float`, optional
The relative size of the legend markers with respect to the original
legend_location : `int`, optional
The location of the legend. The predefined values are:
=============== ==
'best' 0
'upper right' 1
'upper left' 2
'lower left' 3
'lower right' 4
'right' 5
'center left' 6
'center right' 7
'lower center' 8
'upper center' 9
'center' 10
=============== ==
legend_bbox_to_anchor : (`float`, `float`) `tuple`, optional
The bbox that the legend will be anchored.
legend_border_axes_pad : `float`, optional
The pad between the axes and legend border.
legend_n_columns : `int`, optional
The number of the legend's columns.
legend_horizontal_spacing : `float`, optional
The spacing between the columns.
legend_vertical_spacing : `float`, optional
The vertical space between the legend entries.
legend_border : `bool`, optional
If ``True``, a frame will be drawn around the legend.
legend_border_padding : `float`, optional
The fractional whitespace inside the legend border.
legend_shadow : `bool`, optional
If ``True``, a shadow will be drawn behind legend.
legend_rounded_corners : `bool`, optional
If ``True``, the frame's corners will be rounded (fancybox).
render_axes : `bool`, optional
If ``True``, the axes will be rendered.
axes_font_name : See Below, optional
The font of the axes. Example options ::
{serif, sans-serif, cursive, fantasy, monospace}
axes_font_size : `int`, optional
The font size of the axes.
axes_font_style : ``{normal, italic, oblique}``, optional
The font style of the axes.
axes_font_weight : See Below, optional
The font weight of the axes.
Example options ::
{ultralight, light, normal, regular, book, medium, roman,
semibold,demibold, demi, bold, heavy, extra bold, black}
axes_x_limits : `float` or (`float`, `float`) or ``None``, optional
The limits of the x axis. If `float`, then it sets padding on the
right and left of the Image as a percentage of the Image's width. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_y_limits : (`float`, `float`) `tuple` or ``None``, optional
The limits of the y axis. If `float`, then it sets padding on the
top and bottom of the Image as a percentage of the Image's height. If
`tuple` or `list`, then it defines the axis limits. If ``None``, then
the limits are set automatically.
axes_x_ticks : `list` or `tuple` or ``None``, optional
The ticks of the x axis.
axes_y_ticks : `list` or `tuple` or ``None``, optional
The ticks of the y axis.
figure_size : (`float`, `float`) `tuple` or ``None`` optional
The size of the figure in inches.
Raises
------
ValueError
If both ``with_labels`` and ``without_labels`` are passed.
ValueError
If the landmark manager doesn't contain the provided group label.
"""
from menpo.visualize import view_image_landmarks
return view_image_landmarks(
self,
channels,
False,
group,
with_labels,
without_labels,
figure_id,
new_figure,
interpolation,
cmap_name,
alpha,
render_lines,
line_colour,
line_style,
line_width,
render_markers,
marker_style,
marker_size,
marker_face_colour,
marker_edge_colour,
marker_edge_width,
render_numbering,
numbers_horizontal_align,
numbers_vertical_align,
numbers_font_name,
numbers_font_size,
numbers_font_style,
numbers_font_weight,
numbers_font_colour,
render_legend,
legend_title,
legend_font_name,
legend_font_style,
legend_font_size,
legend_font_weight,
legend_marker_scale,
legend_location,
legend_bbox_to_anchor,
legend_border_axes_pad,
legend_n_columns,
legend_horizontal_spacing,
legend_vertical_spacing,
legend_border,
legend_border_padding,
legend_shadow,
legend_rounded_corners,
render_axes,
axes_font_name,
axes_font_size,
axes_font_style,
axes_font_weight,
axes_x_limits,
axes_y_limits,
axes_x_ticks,
axes_y_ticks,
figure_size,
)
def crop(
self,
min_indices,
max_indices,
constrain_to_boundary=False,
return_transform=False,
):
r"""
Return a cropped copy of this image using the given minimum and
maximum indices. Landmarks are correctly adjusted so they maintain
their position relative to the newly cropped image.
Parameters
----------
min_indices : ``(n_dims,)`` `ndarray`
The minimum index over each dimension.
max_indices : ``(n_dims,)`` `ndarray`
The maximum index over each dimension.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
cropped_image : `type(self)`
A new instance of self, but cropped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
``min_indices`` and ``max_indices`` both have to be of length
``n_dims``. All ``max_indices`` must be greater than
``min_indices``.
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices = np.floor(min_indices)
max_indices = np.ceil(max_indices)
if not (min_indices.size == max_indices.size == self.n_dims):
raise ValueError(
"Both min and max indices should be 1D numpy arrays of"
" length n_dims ({})".format(self.n_dims)
)
elif not np.all(max_indices > min_indices):
raise ValueError("All max indices must be greater that the min " "indices")
min_bounded = self.constrain_points_to_bounds(min_indices)
max_bounded = self.constrain_points_to_bounds(max_indices)
all_max_bounded = np.all(min_bounded == min_indices)
all_min_bounded = np.all(max_bounded == max_indices)
if not (constrain_to_boundary or all_max_bounded or all_min_bounded):
# points have been constrained and the user didn't want this -
raise ImageBoundaryError(min_indices, max_indices, min_bounded, max_bounded)
new_shape = (max_bounded - min_bounded).astype(int)
return self.warp_to_shape(
new_shape,
Translation(min_bounded),
order=0,
warp_landmarks=True,
return_transform=return_transform,
)
def crop_to_pointcloud(
self, pointcloud, boundary=0, constrain_to_boundary=True, return_transform=False
):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with an optional ``n_pixel`` boundary.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the bounds of the pointcloud.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
min_indices, max_indices = pointcloud.bounds(boundary=boundary)
return self.crop(
min_indices,
max_indices,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_landmarks(
self, group=None, boundary=0, constrain_to_boundary=True, return_transform=False
):
r"""
Return a copy of this image cropped so that it is bounded around a set
of landmarks with an optional ``n_pixel`` boundary
Parameters
----------
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
boundary : `int`, optional
An extra padding to be added all around the landmarks bounds.
constrain_to_boundary : `bool`, optional
If ``True`` the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to its landmarks.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud(
pc,
boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_pointcloud_proportion(
self,
pointcloud,
boundary_proportion,
minimum=True,
constrain_to_boundary=True,
return_transform=False,
):
r"""
Return a copy of this image cropped so that it is bounded around a
pointcloud with a border proportional to the pointcloud spread or range.
Parameters
----------
pointcloud : :map:`PointCloud`
The pointcloud to crop around.
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the pointclouds' per-dimension range; if ``False`` w.r.t.
the maximum value of the pointclouds' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
A copy of this image cropped to the border proportional to
the pointcloud spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
if minimum:
boundary = boundary_proportion * np.min(pointcloud.range())
else:
boundary = boundary_proportion * np.max(pointcloud.range())
return self.crop_to_pointcloud(
pointcloud,
boundary=boundary,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def crop_to_landmarks_proportion(
self,
boundary_proportion,
group=None,
minimum=True,
constrain_to_boundary=True,
return_transform=False,
):
r"""
Crop this image to be bounded around a set of landmarks with a
border proportional to the landmark spread or range.
Parameters
----------
boundary_proportion : `float`
Additional padding to be added all around the landmarks
bounds defined as a proportion of the landmarks range. See
the minimum parameter for a definition of how the range is
calculated.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
minimum : `bool`, optional
If ``True`` the specified proportion is relative to the minimum
value of the landmarks' per-dimension range; if ``False`` w.r.t. the
maximum value of the landmarks' per-dimension range.
constrain_to_boundary : `bool`, optional
If ``True``, the crop will be snapped to not go beyond this images
boundary. If ``False``, an :map:`ImageBoundaryError` will be raised
if an attempt is made to go beyond the edge of the image.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the cropping is also returned.
Returns
-------
image : :map:`Image`
This image, cropped to its landmarks with a border proportional to
the landmark spread or range.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ImageBoundaryError
Raised if ``constrain_to_boundary=False``, and an attempt is made
to crop the image in a way that violates the image bounds.
"""
pc = self.landmarks[group]
return self.crop_to_pointcloud_proportion(
pc,
boundary_proportion,
minimum=minimum,
constrain_to_boundary=constrain_to_boundary,
return_transform=return_transform,
)
def constrain_points_to_bounds(self, points):
r"""
Constrains the points provided to be within the bounds of this image.
Parameters
----------
points : ``(d,)`` `ndarray`
Points to be snapped to the image boundaries.
Returns
-------
bounded_points : ``(d,)`` `ndarray`
Points snapped to not stray outside the image edges.
"""
bounded_points = points.copy()
# check we don't stray under any edges
bounded_points[bounded_points < 0] = 0
# check we don't stray over any edges
shape = np.array(self.shape)
over_image = (shape - bounded_points) < 0
bounded_points[over_image] = shape[over_image]
return bounded_points
def extract_patches(
self,
patch_centers,
patch_shape=(16, 16),
sample_offsets=None,
as_single_array=True,
order=0,
mode="constant",
cval=0.0,
):
r"""
Extract a set of patches from an image. Given a set of patch centers
and a patch size, patches are extracted from within the image, centred
on the given coordinates. Sample offsets denote a set of offsets to
extract from within a patch. This is very useful if you want to extract
a dense set of features around a set of landmarks and simply sample the
same grid of patches around the landmarks.
If sample offsets are used, to access the offsets for each patch you
need to slice the resulting `list`. So for 2 offsets, the first centers
offset patches would be ``patches[:2]``.
Currently only 2D images are supported.
Note that the default is nearest neighbour sampling for the patches
which is achieved via slicing and is much more efficient than using
sampling/interpolation. Note that a significant performance decrease
will be measured if the ``order`` or ``mode`` parameters are modified
from ``order = 0`` and ``mode = 'constant'`` as internally sampling
will be used rather than slicing.
Parameters
----------
patch_centers : :map:`PointCloud`
The centers to extract patches around.
patch_shape : ``(1, n_dims)`` `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according to
the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside the
image boundaries.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
if self.n_dims != 2:
raise ValueError(
"Only two dimensional patch extraction is " "currently supported."
)
if order == 0 and mode == "constant":
# Fast path using slicing
single_array = extract_patches_with_slice(
self.pixels,
patch_centers.points,
patch_shape,
offsets=sample_offsets,
cval=cval,
)
else:
single_array = extract_patches_by_sampling(
self.pixels,
patch_centers.points,
patch_shape,
offsets=sample_offsets,
order=order,
mode=mode,
cval=cval,
)
if as_single_array:
return single_array
else:
return [Image(o, copy=False) for p in single_array for o in p]
def extract_patches_around_landmarks(
self,
group=None,
patch_shape=(16, 16),
sample_offsets=None,
as_single_array=True,
):
r"""
Extract patches around landmarks existing on this image. Provided the
group label and optionally the landmark label extract a set of patches.
See `extract_patches` for more information.
Currently only 2D images are supported.
Parameters
----------
group : `str` or ``None``, optional
The landmark group to use as patch centres.
patch_shape : `tuple` or `ndarray`, optional
The size of the patch to extract
sample_offsets : ``(n_offsets, n_dims)`` `ndarray` or ``None``, optional
The offsets to sample from within a patch. So ``(0, 0)`` is the
centre of the patch (no offset) and ``(1, 0)`` would be sampling the
patch from 1 pixel up the first axis away from the centre.
If ``None``, then no offsets are applied.
as_single_array : `bool`, optional
If ``True``, an ``(n_center, n_offset, n_channels, patch_shape)``
`ndarray`, thus a single numpy array is returned containing each
patch. If ``False``, a `list` of ``n_center * n_offset``
:map:`Image` objects is returned representing each patch.
Returns
-------
patches : `list` or `ndarray`
Returns the extracted patches. Returns a list if
``as_single_array=True`` and an `ndarray` if
``as_single_array=False``.
Raises
------
ValueError
If image is not 2D
"""
return self.extract_patches(
self.landmarks[group],
patch_shape=patch_shape,
sample_offsets=sample_offsets,
as_single_array=as_single_array,
)
def set_patches(self, patches, patch_centers, offset=None, offset_index=None):
r"""
Set the values of a group of patches into the correct regions of a copy
of this image. Given an array of patches and a set of patch centers,
the patches' values are copied in the regions of the image that are
centred on the coordinates of the given centers.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
# parse arguments
if self.n_dims != 2:
raise ValueError(
"Only two dimensional patch insertion is " "currently supported."
)
if offset is None:
offset = np.zeros([1, 2], dtype=np.intp)
elif isinstance(offset, tuple) or isinstance(offset, list):
offset = np.asarray([offset])
offset = np.require(offset, dtype=np.intp)
if not offset.shape == (1, 2):
raise ValueError(
"The offset must be a tuple, a list or a "
"numpy.array with shape (1, 2)."
)
if offset_index is None:
offset_index = 0
# if patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(
patches, patch_centers.n_points
)
copy = self.copy()
# set patches
set_patches(patches, copy.pixels, patch_centers.points, offset, offset_index)
return copy
def set_patches_around_landmarks(
self, patches, group=None, offset=None, offset_index=None
):
r"""
Set the values of a group of patches around the landmarks existing in a
copy of this image. Given an array of patches, a group and a label, the
patches' values are copied in the regions of the image that are
centred on the coordinates of corresponding landmarks.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Currently only 2D images are supported.
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that
are returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image`
objects.
group : `str` or ``None`` optional
The landmark group to use as patch centres.
offset : `list` or `tuple` or ``(1, 2)`` `ndarray` or ``None``, optional
The offset to apply on the patch centers within the image.
If ``None``, then ``(0, 0)`` is used.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the
index of the second dimension from which to sample. If ``None``,
then ``0`` is used.
Raises
------
ValueError
If image is not 2D
ValueError
If offset does not have shape (1, 2)
"""
return self.set_patches(
patches, self.landmarks[group], offset=offset, offset_index=offset_index
)
def warp_to_mask(
self,
template_mask,
transform,
warp_landmarks=True,
order=1,
mode="constant",
cval=0.0,
batch_size=None,
return_transform=False,
):
r"""
Return a copy of this image warped into a different reference space.
Note that warping into a mask is slower than warping into a full image.
If you don't need a non-linear mask, consider :meth:``warp_to_shape``
instead.
Parameters
----------
template_mask : :map:`BooleanImage`
Defines the shape of the result, and what pixels should be sampled.
transform : :map:`Transform`
Transform **from the template space back to this image**.
Defines, for each pixel location on the template, which pixel
location should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : :map:`MaskedImage`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
if self.n_dims != transform.n_dims:
raise ValueError(
"Trying to warp a {}D image with a {}D transform "
"(they must match)".format(self.n_dims, transform.n_dims)
)
template_points = template_mask.true_indices()
points_to_sample = transform.apply(template_points, batch_size=batch_size)
sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval)
# set any nan values to 0
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_image = self._build_warp_to_mask(template_mask, sampled)
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, "path"):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def _build_warp_to_mask(self, template_mask, sampled_pixel_values):
r"""
Builds the warped image from the template mask and sampled pixel values.
Overridden for :map:`BooleanImage` as we can't use the usual
:meth:`from_vector_inplace` method. All other :map:`Image` classes
share the :map:`Image` implementation.
Parameters
----------
template_mask : :map:`BooleanImage` or 2D `bool ndarray`
Mask for warping.
sampled_pixel_values : ``(n_true_pixels_in_mask,)`` `ndarray`
Sampled value to rebuild the masked image from.
"""
from menpo.image import MaskedImage
warped_image = MaskedImage.init_blank(
template_mask.shape, n_channels=self.n_channels, mask=template_mask
)
warped_image._from_vector_inplace(sampled_pixel_values.ravel())
return warped_image
def sample(self, points_to_sample, order=1, mode="constant", cval=0.0):
r"""
Sample this image at the given sub-pixel accurate points. The input
PointCloud should have the same number of dimensions as the image e.g.
a 2D PointCloud for a 2D multi-channel image. A numpy array will be
returned the has the values for every given point across each channel
of the image.
Parameters
----------
points_to_sample : :map:`PointCloud`
Array of points to sample from the image. Should be
`(n_points, n_dims)`
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5].
See warp_to_shape for more information.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
Returns
-------
sampled_pixels : (`n_points`, `n_channels`) `ndarray`
The interpolated values taken across every channel of the image.
"""
# The public interface is a PointCloud, but when this is used internally
# a numpy array is passed. So let's just treat the PointCloud as a
# 'special case' and not document the ndarray ability.
if isinstance(points_to_sample, PointCloud):
points_to_sample = points_to_sample.points
return scipy_interpolation(
self.pixels, points_to_sample, order=order, mode=mode, cval=cval
)
def warp_to_shape(
self,
template_shape,
transform,
warp_landmarks=True,
order=1,
mode="constant",
cval=0.0,
batch_size=None,
return_transform=False,
):
"""
Return a copy of this image warped into a different reference space.
Parameters
----------
template_shape : `tuple` or `ndarray`
Defines the shape of the result, and what pixel indices should be
sampled (all of them).
transform : :map:`Transform`
Transform **from the template_shape space back to this image**.
Defines, for each index on template_shape, which pixel location
should be sampled from on this image.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
Used in conjunction with mode ``constant``, the value outside
the image boundaries.
batch_size : `int` or ``None``, optional
This should only be considered for large images. Setting this
value can cause warping to become much slower, particular for
cached warps such as Piecewise Affine. This size indicates
how many points in the image should be warped at a time, which
keeps memory usage low. If ``None``, no batching is used and all
points are warped at once.
return_transform : `bool`, optional
This argument is for internal use only. If ``True``, then the
:map:`Transform` object is also returned.
Returns
-------
warped_image : `type(self)`
A copy of this image, warped.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
template_shape = np.array(template_shape, dtype=int)
if (
isinstance(transform, Homogeneous)
and order in range(2)
and self.n_dims == 2
and cv2_perspective_interpolation is not None
):
# we couldn't do the crop, but OpenCV has an optimised
# interpolation for 2D perspective warps - let's use that
warped_pixels = cv2_perspective_interpolation(
self.pixels,
template_shape,
transform,
order=order,
mode=mode,
cval=cval,
)
else:
template_points = indices_for_image_of_shape(template_shape)
points_to_sample = transform.apply(template_points, batch_size=batch_size)
sampled = self.sample(points_to_sample, order=order, mode=mode, cval=cval)
# set any nan values to 0
# (seems that map_coordinates can produce nan values)
sampled[np.isnan(sampled)] = 0
# build a warped version of the image
warped_pixels = sampled.reshape((self.n_channels,) + tuple(template_shape))
return self._build_warp_to_shape(
warped_pixels, transform, warp_landmarks, return_transform
)
def _build_warp_to_shape(
self, warped_pixels, transform, warp_landmarks, return_transform
):
# factored out common logic from the different paths we can take in
# warp_to_shape. Rebuilds an image post-warp, adjusting landmarks
# as necessary.
warped_image = Image(warped_pixels, copy=False)
# warp landmarks if requested.
if warp_landmarks and self.has_landmarks:
warped_image.landmarks = self.landmarks
transform.pseudoinverse()._apply_inplace(warped_image.landmarks)
if hasattr(self, "path"):
warped_image.path = self.path
# optionally return the transform
if return_transform:
return warped_image, transform
else:
return warped_image
def rescale(
self, scale, round="ceil", order=1, warp_landmarks=True, return_transform=False
):
r"""
Return a copy of this image, rescaled by a given factor.
Landmarks are rescaled appropriately.
Parameters
----------
scale : `float` or `tuple` of `floats`
The scale factor. If a tuple, the scale to apply to each dimension.
If a single `float`, the scale will be applied uniformly across
each dimension.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If less scales than dimensions are provided.
If any scale is less than or equal to 0.
"""
# Pythonic way of converting to list if we are passed a single float
try:
if len(scale) < self.n_dims:
raise ValueError(
"Must provide a scale per dimension."
"{} scales were provided, {} were expected.".format(
len(scale), self.n_dims
)
)
except TypeError: # Thrown when len() is called on a float
scale = [scale] * self.n_dims
# Make sure we have a numpy array
scale = np.asarray(scale)
for s in scale:
if s <= 0:
raise ValueError("Scales must be positive floats.")
transform = NonUniformScale(scale)
# use the scale factor to make the template mask bigger
# while respecting the users rounding preference.
template_shape = round_image_shape(transform.apply(self.shape), round)
# due to image indexing, we can't just apply the pseudoinverse
# transform to achieve the scaling we want though!
# Consider a 3x rescale on a 2x4 image. Looking at each dimension:
# H 2 -> 6 so [0-1] -> [0-5] = 5/1 = 5x
# W 4 -> 12 [0-3] -> [0-11] = 11/3 = 3.67x
# => need to make the correct scale per dimension!
shape = np.array(self.shape, dtype=float)
# scale factors = max_index_after / current_max_index
# (note that max_index = length - 1, as 0 based)
scale_factors = (scale * shape - 1) / (shape - 1)
inverse_transform = NonUniformScale(scale_factors).pseudoinverse()
# for rescaling we enforce that mode is nearest to avoid num. errors
return self.warp_to_shape(
template_shape,
inverse_transform,
warp_landmarks=warp_landmarks,
order=order,
mode="nearest",
return_transform=return_transform,
)
def rescale_to_diagonal(
self, diagonal, round="ceil", warp_landmarks=True, return_transform=False
):
r"""
Return a copy of this image, rescaled so that the it's diagonal is a
new size.
Parameters
----------
diagonal: `int`
The diagonal size of the new image.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : type(self)
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
return self.rescale(
diagonal / self.diagonal(),
round=round,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rescale_to_pointcloud(
self,
pointcloud,
group=None,
round="ceil",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rescaled so that the scale of a
particular group of landmarks matches the scale of the passed
reference pointcloud.
Parameters
----------
pointcloud: :map:`PointCloud`
The reference pointcloud to which the landmarks specified by
``group`` will be scaled to match.
group : `str`, optional
The key of the landmark set that should be used. If ``None``,
and if there is only one set of landmarks, this set will be used.
round: ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
pc = self.landmarks[group]
scale = AlignmentUniformScale(pc, pointcloud).as_vector().copy()
return self.rescale(
scale,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rescale_landmarks_to_diagonal_range(
self,
diagonal_range,
group=None,
round="ceil",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rescaled so that the ``diagonal_range`` of
the bounding box containing its landmarks matches the specified
``diagonal_range`` range.
Parameters
----------
diagonal_range: ``(n_dims,)`` `ndarray`
The diagonal_range range that we want the landmarks of the returned
image to have.
group : `str`, optional
The key of the landmark set that should be used. If ``None``
and if there is only one set of landmarks, this set will be used.
round : ``{ceil, floor, round}``, optional
Rounding function to be applied to floating point shapes.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rescale is also returned.
Returns
-------
rescaled_image : ``type(self)``
A copy of this image, rescaled.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
x, y = self.landmarks[group].range()
scale = diagonal_range / np.sqrt(x ** 2 + y ** 2)
return self.rescale(
scale,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def resize(self, shape, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, resized to a particular shape.
All image information (landmarks, and mask in the case of
:map:`MaskedImage`) is resized appropriately.
Parameters
----------
shape : `tuple`
The new shape to resize to.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the resize is also returned.
Returns
-------
resized_image : ``type(self)``
A copy of this image, resized.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError:
If the number of dimensions of the new shape does not match
the number of dimensions of the image.
"""
shape = np.asarray(shape, dtype=float)
if len(shape) != self.n_dims:
raise ValueError(
"Dimensions must match."
"{} dimensions provided, {} were expected.".format(
shape.shape, self.n_dims
)
)
scales = shape / self.shape
# Have to round the shape when scaling to deal with floating point
# errors. For example, if we want (250, 250), we need to ensure that
# we get (250, 250) even if the number we obtain is 250 to some
# floating point inaccuracy.
return self.rescale(
scales,
round="round",
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def zoom(self, scale, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, zoomed about the centre point. ``scale``
values greater than 1.0 denote zooming **in** to the image and values
less than 1.0 denote zooming **out** of the image. The size of the
image will not change, if you wish to scale an image, please see
:meth:`rescale`.
Parameters
----------
scale : `float`
``scale > 1.0`` denotes zooming in. Thus the image will appear
larger and areas at the edge of the zoom will be 'cropped' out.
``scale < 1.0`` denotes zooming out. The image will be padded
by the value of ``cval``.
order : `int`, optional
The order of interpolation. The order has to be in the range [0,5]
========= =====================
Order Interpolation
========= =====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= =====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the zooming is also returned.
Returns
-------
zoomed_image : ``type(self)``
A copy of this image, zoomed.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
"""
t = scale_about_centre(self, 1.0 / scale)
return self.warp_to_shape(
self.shape,
t,
order=order,
mode="nearest",
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def rotate_ccw_about_centre(
self,
theta,
degrees=True,
retain_shape=False,
mode="constant",
cval=0.0,
round="round",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, rotated counter-clockwise about its centre.
Note that the `retain_shape` argument defines the shape of the rotated
image. If ``retain_shape=True``, then the shape of the rotated image
will be the same as the one of current image, so some regions will
probably be cropped. If ``retain_shape=False``, then the returned image
has the correct size so that the whole area of the current image is
included.
Parameters
----------
theta : `float`
The angle of rotation about the centre.
degrees : `bool`, optional
If ``True``, `theta` is interpreted in degrees. If ``False``,
``theta`` is interpreted as radians.
retain_shape : `bool`, optional
If ``True``, then the shape of the rotated image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
The value to be set outside the rotated image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the rotation is also returned.
Returns
-------
rotated_image : ``type(self)``
The rotated image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
Image rotation is presently only supported on 2D images
"""
if self.n_dims != 2:
raise ValueError(
"Image rotation is presently only supported on " "2D images"
)
rotation = Rotation.init_from_2d_ccw_angle(theta, degrees=degrees)
return self.transform_about_centre(
rotation,
retain_shape=retain_shape,
mode=mode,
cval=cval,
round=round,
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def transform_about_centre(
self,
transform,
retain_shape=False,
mode="constant",
cval=0.0,
round="round",
order=1,
warp_landmarks=True,
return_transform=False,
):
r"""
Return a copy of this image, transformed about its centre.
Note that the `retain_shape` argument defines the shape of the
transformed image. If ``retain_shape=True``, then the shape of the
transformed image will be the same as the one of current image, so some
regions will probably be cropped. If ``retain_shape=False``, then the
returned image has the correct size so that the whole area of the
current image is included.
.. note::
This method will not work for transforms that result in a transform
chain as :map:`TransformChain` is not invertible.
.. note::
Be careful when defining transforms for warping imgaes. All pixel
locations must fall within a valid range as expected by the
transform. Therefore, your transformation must accept 'negative'
pixel locations as the pixel locations provided to your transform
will have the object centre subtracted from them.
Parameters
----------
transform : :map:`ComposableTransform` and :map:`VInvertible` type
A composable transform. ``pseudoinverse`` will be invoked on the
resulting transform so it must implement a valid inverse.
retain_shape : `bool`, optional
If ``True``, then the shape of the sheared image will be the same as
the one of current image, so some regions will probably be cropped.
If ``False``, then the returned image has the correct size so that
the whole area of the current image is included.
mode : ``{constant, nearest, reflect, wrap}``, optional
Points outside the boundaries of the input are filled according
to the given mode.
cval : `float`, optional
The value to be set outside the sheared image boundaries.
round : ``{'ceil', 'floor', 'round'}``, optional
Rounding function to be applied to floating point shapes. This is
only used in case ``retain_shape=True``.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``. This is only used in case ``retain_shape=True``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as ``self``, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the shearing is also returned.
Returns
-------
transformed_image : ``type(self)``
The transformed image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Examples
--------
This is an example for rotating an image about its center. Let's
first load an image, create the rotation transform and then apply it ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Rotation
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
rot_tr = Rotation.init_from_2d_ccw_angle(45)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render rotated image
plt.subplot(132)
im.transform_about_centre(rot_tr).view_landmarks()
plt.title('Rotated')
# Render rotated image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(rot_tr, retain_shape=True).view_landmarks()
plt.title('Rotated (Retain original shape)')
Similarly, in order to apply a shear transform ::
import matplotlib.pyplot as plt
import menpo.io as mio
from menpo.transform import Affine
# Load image
im = mio.import_builtin_asset.lenna_png()
# Create shearing transform
shear_tr = Affine.init_from_2d_shear(25, 10)
# Render original image
plt.subplot(131)
im.view_landmarks()
plt.title('Original')
# Render sheared image
plt.subplot(132)
im.transform_about_centre(shear_tr).view_landmarks()
plt.title('Sheared')
# Render sheared image that has shape equal as original image
plt.subplot(133)
im.transform_about_centre(shear_tr,
retain_shape=True).view_landmarks()
plt.title('Sheared (Retain original shape)')
"""
if retain_shape:
shape = self.shape
applied_transform = transform_about_centre(self, transform)
else:
# Get image's bounding box coordinates
original_bbox = bounding_box((0, 0), np.array(self.shape) - 1)
# Translate to origin and apply transform
trans = Translation(-self.centre(), skip_checks=True).compose_before(
transform
)
transformed_bbox = trans.apply(original_bbox)
# Create new translation so that min bbox values go to 0
t = Translation(-transformed_bbox.bounds()[0])
applied_transform = trans.compose_before(t)
transformed_bbox = trans.apply(original_bbox)
# Output image's shape is the range of the sheared bounding box
# while respecting the users rounding preference.
shape = round_image_shape(transformed_bbox.range() + 1, round)
# Warp image
return self.warp_to_shape(
shape,
applied_transform.pseudoinverse(),
order=order,
warp_landmarks=warp_landmarks,
mode=mode,
cval=cval,
return_transform=return_transform,
)
def mirror(self, axis=1, order=1, warp_landmarks=True, return_transform=False):
r"""
Return a copy of this image, mirrored/flipped about a certain axis.
Parameters
----------
axis : `int`, optional
The axis about which to mirror the image.
order : `int`, optional
The order of interpolation. The order has to be in the range
``[0,5]``.
========= ====================
Order Interpolation
========= ====================
0 Nearest-neighbor
1 Bi-linear *(default)*
2 Bi-quadratic
3 Bi-cubic
4 Bi-quartic
5 Bi-quintic
========= ====================
warp_landmarks : `bool`, optional
If ``True``, result will have the same landmark dictionary
as self, but with each landmark updated to the warped position.
return_transform : `bool`, optional
If ``True``, then the :map:`Transform` object that was used to
perform the mirroring is also returned.
Returns
-------
mirrored_image : ``type(self)``
The mirrored image.
transform : :map:`Transform`
The transform that was used. It only applies if
`return_transform` is ``True``.
Raises
------
ValueError
axis cannot be negative
ValueError
axis={} but the image has {} dimensions
"""
# Check axis argument
if axis < 0:
raise ValueError("axis cannot be negative")
elif axis >= self.n_dims:
raise ValueError(
"axis={} but the image has {} " "dimensions".format(axis, self.n_dims)
)
# Create transform that includes ...
# ... flipping about the selected axis ...
rot_matrix = np.eye(self.n_dims)
rot_matrix[axis, axis] = -1
# ... and translating back to the image's bbox
tr_matrix = np.zeros(self.n_dims)
tr_matrix[axis] = self.shape[axis] - 1
# Create transform object
trans = Rotation(rot_matrix, skip_checks=True).compose_before(
Translation(tr_matrix, skip_checks=True)
)
# Warp image
return self.warp_to_shape(
self.shape,
trans.pseudoinverse(),
mode="nearest",
order=order,
warp_landmarks=warp_landmarks,
return_transform=return_transform,
)
def pyramid(self, n_levels=3, downscale=2):
r"""
Return a rescaled pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
image = self.copy()
yield image
for _ in range(n_levels - 1):
image = image.rescale(1.0 / downscale)
yield image
def gaussian_pyramid(self, n_levels=3, downscale=2, sigma=None):
r"""
Return the gaussian pyramid of this image. The first image of the
pyramid will be a copy of the original, unmodified, image, and counts
as level 1.
Parameters
----------
n_levels : `int`, optional
Total number of levels in the pyramid, including the original
unmodified image
downscale : `float`, optional
Downscale factor.
sigma : `float`, optional
Sigma for gaussian filter. Default is ``downscale / 3.`` which
corresponds to a filter mask twice the size of the scale factor
that covers more than 99% of the gaussian distribution.
Yields
------
image_pyramid: `generator`
Generator yielding pyramid layers as :map:`Image` objects.
"""
from menpo.feature import gaussian_filter
if sigma is None:
sigma = downscale / 3.0
image = self.copy()
yield image
for level in range(n_levels - 1):
image = gaussian_filter(image, sigma).rescale(1.0 / downscale)
yield image
def as_greyscale(self, mode="luminosity", channel=None):
r"""
Returns a greyscale version of the image. If the image does *not*
represent a 2D RGB image, then the ``luminosity`` mode will fail.
Parameters
----------
mode : ``{average, luminosity, channel}``, optional
============== =====================================================
mode Greyscale Algorithm
============== =====================================================
average Equal average of all channels
luminosity Calculates the luminance using the CCIR 601 formula:
| .. math:: Y' = 0.2989 R' + 0.5870 G' + 0.1140 B'
channel A specific channel is chosen as the intensity value.
============== =====================================================
channel: `int`, optional
The channel to be taken. Only used if mode is ``channel``.
Returns
-------
greyscale_image : :map:`MaskedImage`
A copy of this image in greyscale.
"""
greyscale = self.copy()
if mode == "luminosity":
if self.n_dims != 2:
raise ValueError(
"The 'luminosity' mode only works on 2D RGB"
"images. {} dimensions found, "
"2 expected.".format(self.n_dims)
)
elif self.n_channels != 3:
raise ValueError(
"The 'luminosity' mode only works on RGB"
"images. {} channels found, "
"3 expected.".format(self.n_channels)
)
# Only compute the coefficients once.
global _greyscale_luminosity_coef
if _greyscale_luminosity_coef is None:
_greyscale_luminosity_coef = np.linalg.inv(
np.array(
[
[1.0, 0.956, 0.621],
[1.0, -0.272, -0.647],
[1.0, -1.106, 1.703],
]
)
)[0, :]
# Compute greyscale via dot product
pixels = np.dot(_greyscale_luminosity_coef, greyscale.pixels.reshape(3, -1))
# Reshape image back to original shape (with 1 channel)
pixels = pixels.reshape(greyscale.shape)
elif mode == "average":
pixels = np.mean(greyscale.pixels, axis=0)
elif mode == "channel":
if channel is None:
raise ValueError(
"For the 'channel' mode you have to provide" " a channel index"
)
pixels = greyscale.pixels[channel]
else:
raise ValueError(
"Unknown mode {} - expected 'luminosity', "
"'average' or 'channel'.".format(mode)
)
# Set new pixels - ensure channel axis and maintain
greyscale.pixels = pixels[None, ...].astype(greyscale.pixels.dtype, copy=False)
return greyscale
def as_PILImage(self, out_dtype=np.uint8):
r"""
Return a PIL copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
pil_image : `PILImage`
PIL copy of image
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 is supported.
"""
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
"Can only convert greyscale or RGB 2D images. "
"Received a {} channel {}D image.".format(self.n_channels, self.n_dims)
)
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
pixels = denormalize_pixels_range(pixels, out_dtype)
return PILImage.fromarray(pixels)
def as_imageio(self, out_dtype=np.uint8):
r"""
Return an Imageio copy of the image scaled and cast to the correct
values for the provided ``out_dtype``.
Image must only have 1 or 3 channels and be 2 dimensional.
Non `uint8` floating point images must be in the range ``[0, 1]`` to be
converted.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
imageio_image : `ndarray`
Imageio image (which is just a numpy ndarray with the channels
as the last axis).
Raises
------
ValueError
If image is not 2D and has 1 channel or 3 channels.
ValueError
If pixels data type is `float32` or `float64` and the pixel
range is outside of ``[0, 1]``
ValueError
If the output dtype is unsupported. Currently uint8 and uint16
are supported.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .pixels_with_channels_at_back instead.",
MenpoDeprecationWarning,
)
if self.n_dims != 2 or (self.n_channels != 1 and self.n_channels != 3):
raise ValueError(
"Can only convert greyscale or RGB 2D images. "
"Received a {} channel {}D image.".format(self.n_channels, self.n_dims)
)
# Slice off the channel for greyscale images
if self.n_channels == 1:
pixels = self.pixels[0]
else:
pixels = channels_to_back(self.pixels)
return denormalize_pixels_range(pixels, out_dtype)
def pixels_range(self):
r"""
The range of the pixel values (min and max pixel values).
Returns
-------
min_max : ``(dtype, dtype)``
The minimum and maximum value of the pixels array.
"""
return self.pixels.min(), self.pixels.max()
def rolled_channels(self):
r"""
Deprecated - please use the equivalent ``pixels_with_channels_at_back`` method.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .pixels_with_channels_at_back() instead.",
MenpoDeprecationWarning,
)
return self.pixels_with_channels_at_back()
def pixels_with_channels_at_back(self, out_dtype=None):
r"""
Returns the pixels matrix, with the channels rolled to the back axis.
This may be required for interacting with external code bases that
require images to have channels as the last axis, rather than the
Menpo convention of channels as the first axis.
If this image is single channel, the final axis is dropped.
Parameters
----------
out_dtype : `np.dtype`, optional
The dtype the output array should be.
Returns
-------
rolled_channels : `ndarray`
Pixels with channels as the back (last) axis. If single channel,
the last axis will be dropped.
"""
p = channels_to_back(self.pixels)
if out_dtype is not None:
p = denormalize_pixels_range(p, out_dtype=out_dtype)
return np.squeeze(p)
def __str__(self):
return "{} {}D Image with {} channel{}".format(
self._str_shape(), self.n_dims, self.n_channels, "s" * (self.n_channels > 1)
)
def has_landmarks_outside_bounds(self):
"""
Indicates whether there are landmarks located outside the image bounds.
:type: `bool`
"""
if self.has_landmarks:
for l_group in self.landmarks:
pc = self.landmarks[l_group].points
if np.any(np.logical_or(self.shape - pc < 1, pc < 0)):
return True
return False
def constrain_landmarks_to_bounds(self):
r"""
Deprecated - please use the equivalent ``constrain_to_bounds`` method
now on PointCloud, in conjunction with the new Image ``bounds()``
method. For example:
>>> im.constrain_landmarks_to_bounds() # Equivalent to below
>>> im.landmarks['test'] = im.landmarks['test'].constrain_to_bounds(im.bounds())
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .constrain_to_bounds() instead (on PointCloud).",
MenpoDeprecationWarning,
)
for l_group in self.landmarks:
l = self.landmarks[l_group]
for k in range(l.points.shape[1]):
tmp = l.points[:, k]
tmp[tmp < 0] = 0
tmp[tmp > self.shape[k] - 1] = self.shape[k] - 1
l.points[:, k] = tmp
self.landmarks[l_group] = l
def normalize_std(self, mode="all", **kwargs):
r"""
Returns a copy of this image normalized such that its
pixel values have zero mean and unit variance.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .normalize_std() instead (features package).",
MenpoDeprecationWarning,
)
return self._normalize(np.std, mode=mode)
def normalize_norm(self, mode="all", **kwargs):
r"""
Returns a copy of this image normalized such that its pixel values
have zero mean and its norm equals 1.
Parameters
----------
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
unit norm.
Returns
-------
image : ``type(self)``
A copy of this image, normalized.
"""
warn(
"This method is no longer supported and will be removed in a "
"future version of Menpo. "
"Use .normalize_norm() instead (features package).",
MenpoDeprecationWarning,
)
def scale_func(pixels, axis=None):
return np.linalg.norm(pixels, axis=axis, **kwargs)
return self._normalize(scale_func, mode=mode)
def _normalize(self, scale_func, mode="all"):
from menpo.feature import normalize
return normalize(self, scale_func=scale_func, mode=mode)
def rescale_pixels(self, minimum, maximum, per_channel=True):
r"""A copy of this image with pixels linearly rescaled to fit a range.
Note that the only pixels that will be considered and rescaled are those
that feature in the vectorized form of this image. If you want to use
this routine on all the pixels in a :map:`MaskedImage`, consider
using `as_unmasked()` prior to this call.
Parameters
----------
minimum: `float`
The minimal value of the rescaled pixels
maximum: `float`
The maximal value of the rescaled pixels
per_channel: `boolean`, optional
If ``True``, each channel will be rescaled independently. If
``False``, the scaling will be over all channels.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
v = self.as_vector(keep_channels=True).T
if per_channel:
min_, max_ = v.min(axis=0), v.max(axis=0)
else:
min_, max_ = v.min(), v.max()
sf = ((maximum - minimum) * 1.0) / (max_ - min_)
v_new = ((v - min_) * sf) + minimum
return self.from_vector(v_new.T.ravel())
def clip_pixels(self, minimum=None, maximum=None):
r"""A copy of this image with pixels linearly clipped to fit a range.
Parameters
----------
minimum: `float`, optional
The minimal value of the clipped pixels. If None is provided, the
default value will be 0.
maximum: `float`, optional
The maximal value of the clipped pixels. If None is provided, the
default value will depend on the dtype.
Returns
-------
rescaled_image: ``type(self)``
A copy of this image with pixels linearly rescaled to fit in the
range provided.
"""
if minimum is None:
minimum = 0
if maximum is None:
dtype = self.pixels.dtype
if dtype == np.uint8:
maximum = 255
elif dtype == np.uint16:
maximum = 65535
elif dtype in [np.float32, np.float64]:
maximum = 1.0
else:
m1 = "Could not recognise the dtype ({}) to set the maximum."
raise ValueError(m1.format(dtype))
copy = self.copy()
copy.pixels = copy.pixels.clip(min=minimum, max=maximum)
return copy
def rasterize_landmarks(
self,
group=None,
render_lines=True,
line_style="-",
line_colour="b",
line_width=1,
render_markers=True,
marker_style="o",
marker_size=1,
marker_face_colour="b",
marker_edge_colour="b",
marker_edge_width=1,
backend="matplotlib",
):
r"""
This method provides the ability to rasterize 2D landmarks onto the
image. The returned image has the specified landmark groups rasterized
onto the image - which is useful for things like creating result
examples or rendering videos with annotations.
Since multiple landmark groups can be specified, all arguments can take
lists of parameters that map to the provided groups list. Therefore, the
parameters must be lists of the correct length or a single parameter to
apply to every landmark group.
Multiple backends are provided, all with different strengths. The
'pillow' backend is very fast, but not very flexible. The `matplotlib`
backend should be feature compatible with other Menpo rendering methods,
but is much slower due to the overhead of creating a figure to render
into.
Parameters
----------
group : `str` or `list` of `str`, optional
The landmark group key, or a list of keys.
render_lines : `bool`, optional
If ``True``, and the provided landmark group is a
:map:`PointDirectedGraph`, the edges are rendered.
line_style : `str`, optional
The style of the edge line. Not all backends support this argument.
line_colour : `str` or `tuple`, optional
A Matplotlib style colour or a backend dependant colour.
line_width : `int`, optional
The width of the line to rasterize.
render_markers : `bool`, optional
If ``True``, render markers at the coordinates of each landmark.
marker_style : `str`, optional
A Matplotlib marker style. Not all backends support all marker
styles.
marker_size : `int`, optional
The size of the marker - different backends use different scale
spaces so consistent output may by difficult.
marker_face_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_colour : `str`, optional
A Matplotlib style colour or a backend dependant colour.
marker_edge_width : `int`, optional
The width of the marker edge. Not all backends support this.
backend : {'matplotlib', 'pillow'}, optional
The backend to use.
Returns
-------
rasterized_image : :map:`Image`
The image with the landmarks rasterized directly into the pixels.
Raises
------
ValueError
Only 2D images are supported.
ValueError
Only RGB (3-channel) or Greyscale (1-channel) images are supported.
"""
from .rasterize import rasterize_landmarks_2d
return rasterize_landmarks_2d(
self,
group=group,
render_lines=render_lines,
line_style=line_style,
line_colour=line_colour,
line_width=line_width,
render_markers=render_markers,
marker_style=marker_style,
marker_size=marker_size,
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=marker_edge_width,
backend=backend,
)
def round_image_shape(shape, round):
if round not in ["ceil", "round", "floor"]:
raise ValueError("round must be either ceil, round or floor")
# Ensure that the '+' operator means concatenate tuples
return tuple(getattr(np, round)(shape).astype(int))
def _convert_patches_list_to_single_array(patches_list, n_center):
r"""
Converts patches from a `list` of :map:`Image` objects to a single `ndarray`
with shape ``(n_center, n_offset, self.n_channels, patch_shape)``.
Note that these two are the formats returned by the `extract_patches()`
and `extract_patches_around_landmarks()` methods of :map:`Image` class.
Parameters
----------
patches_list : `list` of `n_center * n_offset` :map:`Image` objects
A `list` that contains all the patches as :map:`Image` objects.
n_center : `int`
The number of centers from which the patches are extracted.
Returns
-------
patches_array : `ndarray` ``(n_center, n_offset, n_channels, patch_shape)``
The numpy array that contains all the patches.
"""
n_offsets = int(len(patches_list) / n_center)
n_channels = patches_list[0].n_channels
height = patches_list[0].height
width = patches_list[0].width
patches_array = np.empty(
(n_center, n_offsets, n_channels, height, width),
dtype=patches_list[0].pixels.dtype,
)
total_index = 0
for p in range(n_center):
for o in range(n_offsets):
patches_array[p, o, ...] = patches_list[total_index].pixels
total_index += 1
return patches_array
def _create_patches_image(
patches, patch_centers, patches_indices=None, offset_index=None, background="black"
):
r"""
Creates an :map:`Image` object in which the patches are located on the
correct regions based on the centers. Thus, the image is a block-sparse
matrix. It has also attached a `patch_Centers` :map:`PointCloud`
object with the centers that correspond to the patches that the user
selected to set.
The patches argument can have any of the two formats that are returned
from the `extract_patches()` and `extract_patches_around_landmarks()`
methods of the :map:`Image` class. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` :map:`Image` objects
Parameters
----------
patches : `ndarray` or `list`
The values of the patches. It can have any of the two formats that are
returned from the `extract_patches()` and
`extract_patches_around_landmarks()` methods. Specifically, it can
either be an ``(n_center, n_offset, self.n_channels, patch_shape)``
`ndarray` or a `list` of ``n_center * n_offset`` :map:`Image` objects.
patch_centers : :map:`PointCloud`
The centers to set the patches around.
patches_indices : `int` or `list` of `int` or ``None``, optional
Defines the patches that will be set (copied) to the image. If ``None``,
then all the patches are copied.
offset_index : `int` or ``None``, optional
The offset index within the provided `patches` argument, thus the index
of the second dimension from which to sample. If ``None``, then ``0`` is
used.
background : ``{'black', 'white'}``, optional
If ``'black'``, then the background is set equal to the minimum value
of `patches`. If ``'white'``, then the background is set equal to the
maximum value of `patches`.
Returns
-------
patches_image : :map:`Image`
The output patches image object.
Raises
------
ValueError
Background must be either ''black'' or ''white''.
"""
# If patches is a list, convert it to array
if isinstance(patches, list):
patches = _convert_patches_list_to_single_array(patches, patch_centers.n_points)
# Parse inputs
if offset_index is None:
offset_index = 0
if patches_indices is None:
patches_indices = np.arange(patches.shape[0])
elif not isinstance(patches_indices, Iterable):
patches_indices = [patches_indices]
# Compute patches image's shape
n_channels = patches.shape[2]
patch_shape0 = patches.shape[3]
patch_shape1 = patches.shape[4]
top, left = np.min(patch_centers.points, 0)
bottom, right = np.max(patch_centers.points, 0)
min_0 = np.floor(top - patch_shape0)
min_1 = np.floor(left - patch_shape1)
max_0 = np.ceil(bottom + patch_shape0)
max_1 = np.ceil(right + patch_shape1)
height = max_0 - min_0 + 1
width = max_1 - min_1 + 1
# Translate the patch centers to fit in the new image
new_patch_centers = patch_centers.copy()
new_patch_centers.points = patch_centers.points - np.array([[min_0, min_1]])
# Create new image with the correct background values
if background == "black":
patches_image = Image.init_blank(
(height, width),
n_channels,
fill=np.min(patches[patches_indices]),
dtype=patches.dtype,
)
elif background == "white":
patches_image = Image.init_blank(
(height, width),
n_channels,
fill=np.max(patches[patches_indices]),
dtype=patches.dtype,
)
else:
raise ValueError("Background must be either " "black" " or " "white" ".")
# If there was no slicing on the patches, then attach the original patch
# centers. Otherwise, attach the sliced ones.
if set(patches_indices) == set(range(patches.shape[0])):
patches_image.landmarks["patch_centers"] = new_patch_centers
else:
tmp_centers = PointCloud(new_patch_centers.points[patches_indices])
patches_image.landmarks["patch_centers"] = tmp_centers
# Set the patches
return patches_image.set_patches_around_landmarks(
patches[patches_indices], group="patch_centers", offset_index=offset_index
)
| bsd-3-clause |
myyc/kleptes | kleptes/utils.py | 1 | 1213 | import re
import fnmatch
import pandas as pd
# three days
EXPIRE = 259200
def get_re(pattern):
if type(pattern) == str:
return re.compile(fnmatch.translate(pattern), flags=re.IGNORECASE)
elif type(pattern) == list:
return re.compile("|".join(fnmatch.translate(pattern)),
flags=re.IGNORECASE)
else:
raise ValueError("'pattern' must either be a string or a list "
"(or None)")
class SearchableDataFrame(pd.DataFrame):
"""A DataFrame that implements a search method via __call__"""
def __call__(self, pattern=None, cols=None):
if pattern is None:
return self
p = get_re(pattern)
if cols not in list(self.columns):
if cols is None:
_df = self.select_dtypes(["object"])
elif type(cols) == list:
_df = self[cols]
else:
raise ValueError("'cols' must be either a 'list', 'None' or "
"the name of a column.")
s = _df.apply(lambda x: x.str.match(p)).sum(axis=1).astype("bool")
else:
s = self[cols].str.match(p)
return self[s]
| bsd-3-clause |
jkibele/OpticalRS | OpticalRS/DepthEstimator.py | 1 | 9986 | # -*- coding: utf-8 -*-
"""
DepthEstimator
==============
Code for handling required data and producing depth estimates from multispectral
satellite imagery. KNN (Kibele and Shears, In Review) and linear methods
(Lyzenga et al., 2006) are currently supported.
References
----------
Kibele, J., Shears, N.T., In Press. Non-parametric empirical depth regression
for bathymetric mapping in coastal waters. IEEE Journal of Selected Topics in
Applied Earth Observations and Remote Sensing.
Lyzenga, D.R., Malinas, N.P., Tanis, F.J., 2006. Multispectral bathymetry using
a simple physically based algorithm. Geoscience and Remote Sensing, IEEE
Transactions on 44, 2251–2259. doi:10.1109/TGRS.2006.872909
"""
from RasterDS import RasterDS
from ArrayUtils import mask3D_with_2D, equalize_array_masks, equalize_band_masks
import KNNDepth
from Lyzenga2006 import dark_pixel_array, fit_and_predict, deep_water_means
import numpy as np
from sklearn.cross_validation import train_test_split
class DepthEstimator(object):
"""
Once initialized, this object can do a bunch of tasks related to depth
estimation.
Parameters
----------
img : OpticalRS.RasterDS, numpy image array, or file path to image
This is the multispectral image for which depth is to be estimated.
known_depths : OpticalRS.RasterDS, numpy image array, or file path to image
A raster of known depths.
Notes
-----
Assumptions:
- size of known_depths array = size of a single band of img
- unmasked known_depths pixels are a subset of unmasked img pixels
"""
def __init__(self,img,known_depths):
self.img_original = img
self.imrds = None
try:
self.imlevel = img.ndim
except AttributeError:
if type(img).__name__ == 'RasterDS':
self.imlevel = 4
self.imrds = img
else:
# next line should raise exception if `img` can't make RasterDS
self.imrds = RasterDS(img)
self.imlevel = 4
self.known_original = known_depths
if type(self.known_original).__name__=='RasterDS':
self.kdrds = known_depths
elif np.ma.isMaskedArray(self.known_original):
self.kdrds = None
else:
self.kdrds = RasterDS(self.known_original)
self.known_depth_arr = self.__known_depth_arr()
self.imarr = self.__imarr()
self.__mask_depths_with_no_image()
self.nbands = self.imarr_flat.shape[-1]
# Check that the numbers of pixels are compatible
impix = self.imarr_flat.size / self.nbands
dpix = self.known_depth_arr_flat.size
errstr = "{} image pixels and {} depth pixels. Need the same number of pixels."
assert impix == dpix, errstr.format(impix,dpix)
def __imarr(self):
"""
Return 3D (R,C,nBands) image array if possible. If only 2D
(pixels,nBands) array is available, return `None`. Returned array will
be np.ma.MaskedArray type even if no pixels are masked.
"""
try:
self.imarr
except AttributeError:
if self.imlevel == 4:
arr = np.ma.masked_invalid(self.imrds.band_array)
self.imarr = arr
elif self.imlevel == 3:
arr = np.ma.masked_invalid(self.img_original)
self.imarr = arr
else: # level 2
self.imarr = None
return self.imarr
def __known_depth_arr(self):
"""
Return a 2D (R,C) masked array of known depths if possible. If flat
array was handed in instead, return `None`.
"""
try:
self.known_depth_arr
except AttributeError:
if self.kdrds:
arr = self.kdrds.band_array.squeeze()
self.known_depth_arr = np.ma.masked_invalid(arr)
elif isinstance(self.known_original,np.ndarray):
arr = self.known_original.squeeze()
if arr.ndim > 1:
self.known_depth_arr = np.ma.masked_invalid(arr)
else:
self.known_depth_arr = None
else:
# I can't think of a case where we'd get here but...
self.known_depth_arr = None
return self.known_depth_arr
def __mask_depths_with_no_image(self):
"""
Mask depths that have no corresponding pixels. Only works for non-flat
arrays.
"""
if np.ma.is_masked(self.imarr) and np.ma.is_masked(self.known_depth_arr):
# I'm assuming all image bands have the same mask. ...they should.
immask = self.imarr[...,0].mask
self.known_depth_arr = np.ma.masked_where(immask, self.known_depth_arr)
@property
def known_depth_arr_flat(self):
if np.ma.isMA(self.known_depth_arr):
return self.known_depth_arr.ravel()
else:
return self.known_original
@property
def imarr_flat(self):
"""
Return all the image pixels in (pixels,bands) shape.
"""
if self.imlevel > 2:
return self.imarr.reshape(-1,self.imarr.shape[-1])
else:
return self.img_original
@property
def imarr_compressed(self):
"""
Return unmasked pixels in (pixels,bands) shape.
"""
return self.imarr_flat.compressed().reshape(-1,self.nbands)
@property
def known_imarr(self):
"""
Return 3D (R,C,nBands) image array with pixels masked where no known
depth is available. If no 3D image array is available, return `None`.
"""
if np.ma.isMA(self.imarr) and np.ma.isMA(self.known_depth_arr):
return mask3D_with_2D(self.imarr,self.known_depth_arr.mask)
else:
return None
@property
def known_imarr_flat(self):
"""
The flattend (pix,bands) image array with all pixels of unknown depth
masked.
"""
if np.ma.isMA(self.known_imarr):
return self.known_imarr.reshape(-1,self.nbands)
else:
mask1b = self.known_depth_arr_flat.mask
mask = np.repeat(np.atleast_2d(mask1b).T,self.nbands,1)
return np.ma.masked_where(mask,self.imarr_flat)
def training_split(self,train_size=0.4,random_state=0):
"""
Split your `DepthEstimator` into training and test subsets. This is a
wrapper on the scikit-learn `cross_validation.train_test_split`. More
info: http://scikit-learn.org/stable/modules/generated/sklearn.cross_validation.train_test_split.html
Parameters
----------
train_size : float, int, or None (default is 0.4)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If int,
represents the absolute number of train samples. If None, the value
is automatically set to 0.75.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Returns
-------
(train_DE,test_DE) : tuple of DepthEstimators
Two `DepthEstimator` objects made with compressed and flattened
arrays. Suitable for training and/or testing depth estimators but
not for producing images.
"""
im_train, im_test, dep_train, dep_test = train_test_split(
self.known_imarr_flat, self.known_depth_arr_flat,
train_size=train_size,random_state=random_state)
return DepthEstimator(im_train,dep_train),DepthEstimator(im_test,dep_test)
def knn_depth_model(self,k=5,weights='uniform',metric='minkowski',
n_jobs=4, **kwargs):
"""
Return a trained KNN depth model. See `OpticalRS.KNNDepth.train_model`
for more information. This is really just a wrapper over the
KNeighborsRegressor model in scikit-learn.
"""
return KNNDepth.train_model(self.known_imarr_flat.compressed().reshape(-1,self.nbands),
self.known_depth_arr_flat.compressed(),
k=k, weights=weights,
metric=metric, n_jobs=n_jobs, **kwargs)
def knn_depth_estimation(self,k=5,weights='uniform',
metric='minkowski',n_jobs=4, **kwargs):
"""
Train a KNN regression model with `known_depths` and corresponding
pixels from `img`. Then use that model to predict depths for all pixels
in `img`. Return a single band array of estimated depths.
"""
out = self.imarr[...,0].copy()
knnmodel = self.knn_depth_model(k=k, weights=weights,
metric=metric, n_jobs=n_jobs, **kwargs)
ests = knnmodel.predict(self.imarr_compressed)
out[~out.mask] = ests
return out
def lyzenga_depth_estimation(self, Rinf=None, bands=None, n_std=0,
n_jobs=4):
"""
This will implement the linear depth estimation method described in
Lyzenga et al. 2006. This doc string needs a bit more detail but I don't
have time right now. Check `OpticalRS.Lyzenga2006` for more detail. This
method just wraps some of the code from that module to make it easier to
run.
"""
if bands is None:
bands = self.nbands
if Rinf is None:
Rinf = deep_water_means(self.imarr[...,:bands], n_std=n_std)
X = np.ma.log(self.imarr[...,:bands] - Rinf)
X = equalize_band_masks(X)
# need to re-equalize, might have lost pixels in log transform
Xtrain, deparr = equalize_array_masks(X, self.known_depth_arr)
return fit_and_predict(Xtrain, deparr, X, n_jobs=n_jobs)
| bsd-3-clause |
Eric89GXL/mne-python | mne/externals/tqdm/_tqdm/std.py | 14 | 55471 | """
Customisable progressbar decorator for iterators.
Includes a default (x)range iterator printing to stderr.
Usage:
>>> from tqdm import trange[, tqdm]
>>> for i in trange(10): #same as: for i in tqdm(xrange(10))
... ...
"""
from __future__ import absolute_import, division
# compatibility functions and utilities
from .utils import _supports_unicode, _environ_cols_wrapper, _range, _unich, \
_term_move_up, _unicode, WeakSet, _basestring, _OrderedDict, _text_width, \
Comparable, RE_ANSI, _is_ascii, FormatReplace, \
SimpleTextIOWrapper, CallbackIOWrapper
from ._monitor import TMonitor
# native libraries
from contextlib import contextmanager
import sys
from numbers import Number
from time import time
# For parallelism safety
import threading as th
from warnings import warn
__author__ = {"github.com/": ["noamraph", "obiwanus", "kmike", "hadim",
"casperdcl", "lrq3000"]}
__all__ = ['tqdm', 'trange',
'TqdmTypeError', 'TqdmKeyError', 'TqdmWarning',
'TqdmExperimentalWarning', 'TqdmDeprecationWarning',
'TqdmMonitorWarning']
class TqdmTypeError(TypeError):
pass
class TqdmKeyError(KeyError):
pass
class TqdmWarning(Warning):
"""base class for all tqdm warnings.
Used for non-external-code-breaking errors, such as garbled printing.
"""
def __init__(self, msg, fp_write=None, *a, **k):
if fp_write is not None:
fp_write("\n" + self.__class__.__name__ + ": " +
str(msg).rstrip() + '\n')
else:
super(TqdmWarning, self).__init__(msg, *a, **k)
class TqdmExperimentalWarning(TqdmWarning, FutureWarning):
"""beta feature, unstable API and behaviour"""
pass
class TqdmDeprecationWarning(TqdmWarning, DeprecationWarning):
# not suppressed if raised
pass
class TqdmMonitorWarning(TqdmWarning, RuntimeWarning):
"""tqdm monitor errors which do not affect external functionality"""
pass
class TqdmDefaultWriteLock(object):
"""
Provide a default write lock for thread and multiprocessing safety.
Works only on platforms supporting `fork` (so Windows is excluded).
You must initialise a `tqdm` or `TqdmDefaultWriteLock` instance
before forking in order for the write lock to work.
On Windows, you need to supply the lock from the parent to the children as
an argument to joblib or the parallelism lib you use.
"""
def __init__(self):
# Create global parallelism locks to avoid racing issues with parallel
# bars works only if fork available (Linux/MacOSX, but not Windows)
self.create_mp_lock()
self.create_th_lock()
cls = type(self)
self.locks = [lk for lk in [cls.mp_lock, cls.th_lock] if lk is not None]
def acquire(self, *a, **k):
for lock in self.locks:
lock.acquire(*a, **k)
def release(self):
for lock in self.locks[::-1]: # Release in inverse order of acquisition
lock.release()
def __enter__(self):
self.acquire()
def __exit__(self, *exc):
self.release()
@classmethod
def create_mp_lock(cls):
if not hasattr(cls, 'mp_lock'):
try:
from multiprocessing import RLock
cls.mp_lock = RLock() # multiprocessing lock
except ImportError: # pragma: no cover
cls.mp_lock = None
except OSError: # pragma: no cover
cls.mp_lock = None
@classmethod
def create_th_lock(cls):
if not hasattr(cls, 'th_lock'):
try:
cls.th_lock = th.RLock() # thread lock
except OSError: # pragma: no cover
cls.th_lock = None
# Create a thread lock before instantiation so that no setup needs to be done
# before running in a multithreaded environment.
# Do not create the multiprocessing lock because it sets the multiprocessing
# context and does not allow the user to use 'spawn' or 'forkserver' methods.
TqdmDefaultWriteLock.create_th_lock()
class Bar(object):
"""
`str.format`-able bar with format specifiers: `[width][type]`
- `width`
+ unspecified (default): use `self.default_len`
+ `int >= 0`: overrides `self.default_len`
+ `int < 0`: subtract from `self.default_len`
- `type`
+ `a`: ascii (`charset=self.ASCII` override)
+ `u`: unicode (`charset=self.UTF` override)
+ `b`: blank (`charset=" "` override)
"""
ASCII = " 123456789#"
UTF = u" " + u''.join(map(_unich, range(0x258F, 0x2587, -1)))
BLANK = " "
def __init__(self, frac, default_len=10, charset=UTF):
if not (0 <= frac <= 1):
warn("clamping frac to range [0, 1]", TqdmWarning, stacklevel=2)
frac = max(0, min(1, frac))
assert default_len > 0
self.frac = frac
self.default_len = default_len
self.charset = charset
def __format__(self, format_spec):
if format_spec:
_type = format_spec[-1].lower()
try:
charset = dict(a=self.ASCII, u=self.UTF, b=self.BLANK)[_type]
except KeyError:
charset = self.charset
else:
format_spec = format_spec[:-1]
if format_spec:
N_BARS = int(format_spec)
if N_BARS < 0:
N_BARS += self.default_len
else:
N_BARS = self.default_len
else:
charset = self.charset
N_BARS = self.default_len
nsyms = len(charset) - 1
bar_length, frac_bar_length = divmod(
int(self.frac * N_BARS * nsyms), nsyms)
bar = charset[-1] * bar_length
frac_bar = charset[frac_bar_length]
# whitespace padding
if bar_length < N_BARS:
return bar + frac_bar + \
charset[0] * (N_BARS - bar_length - 1)
return bar
class tqdm(Comparable):
"""
Decorate an iterable object, returning an iterator which acts exactly
like the original iterable, but prints a dynamically updating
progressbar every time a value is requested.
"""
monitor_interval = 10 # set to 0 to disable the thread
monitor = None
@staticmethod
def format_sizeof(num, suffix='', divisor=1000):
"""
Formats a number (greater than unity) with SI Order of Magnitude
prefixes.
Parameters
----------
num : float
Number ( >= 1) to format.
suffix : str, optional
Post-postfix [default: ''].
divisor : float, optional
Divisor between prefixes [default: 1000].
Returns
-------
out : str
Number with Order of Magnitude SI unit postfix.
"""
for unit in ['', 'k', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 999.5:
if abs(num) < 99.95:
if abs(num) < 9.995:
return '{0:1.2f}'.format(num) + unit + suffix
return '{0:2.1f}'.format(num) + unit + suffix
return '{0:3.0f}'.format(num) + unit + suffix
num /= divisor
return '{0:3.1f}Y'.format(num) + suffix
@staticmethod
def format_interval(t):
"""
Formats a number of seconds as a clock time, [H:]MM:SS
Parameters
----------
t : int
Number of seconds.
Returns
-------
out : str
[H:]MM:SS
"""
mins, s = divmod(int(t), 60)
h, m = divmod(mins, 60)
if h:
return '{0:d}:{1:02d}:{2:02d}'.format(h, m, s)
else:
return '{0:02d}:{1:02d}'.format(m, s)
@staticmethod
def format_num(n):
"""
Intelligent scientific notation (.3g).
Parameters
----------
n : int or float or Numeric
A Number.
Returns
-------
out : str
Formatted number.
"""
f = '{0:.3g}'.format(n).replace('+0', '+').replace('-0', '-')
n = str(n)
return f if len(f) < len(n) else n
@staticmethod
def ema(x, mu=None, alpha=0.3):
"""
Exponential moving average: smoothing to give progressively lower
weights to older values.
Parameters
----------
x : float
New value to include in EMA.
mu : float, optional
Previous EMA value.
alpha : float, optional
Smoothing factor in range [0, 1], [default: 0.3].
Increase to give more weight to recent values.
Ranges from 0 (yields mu) to 1 (yields x).
"""
return x if mu is None else (alpha * x) + (1 - alpha) * mu
@staticmethod
def status_printer(file):
"""
Manage the printing and in-place updating of a line of characters.
Note that if the string is longer than a line, then in-place
updating may not work (it will print a new line at each refresh).
"""
fp = file
fp_flush = getattr(fp, 'flush', lambda: None) # pragma: no cover
def fp_write(s):
fp.write(_unicode(s))
fp_flush()
last_len = [0]
def print_status(s):
len_s = len(s)
fp_write('\r' + s + (' ' * max(last_len[0] - len_s, 0)))
last_len[0] = len_s
return print_status
@staticmethod
def format_meter(n, total, elapsed, ncols=None, prefix='', ascii=False,
unit='it', unit_scale=False, rate=None, bar_format=None,
postfix=None, unit_divisor=1000, **extra_kwargs):
"""
Return a string-based progress bar given some parameters
Parameters
----------
n : int or float
Number of finished iterations.
total : int or float
The expected total number of iterations. If meaningless (None),
only basic progress statistics are displayed (no ETA).
elapsed : float
Number of seconds passed since start.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes `{bar}` to stay within this bound
[default: None]. If `0`, will not print any bar (only stats).
The fallback is `{bar:10}`.
prefix : str, optional
Prefix message (included in total width) [default: ''].
Use as {desc} in bar_format string.
ascii : bool, optional or str, optional
If not set, use unicode (smooth blocks) to fill the meter
[default: False]. The fallback is to use ASCII characters
" 123456789#".
unit : str, optional
The iteration unit [default: 'it'].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be printed with an
appropriate SI metric prefix (k = 10^3, M = 10^6, etc.)
[default: False]. If any other non-zero number, will scale
`total` and `n`.
rate : float, optional
Manual override for iteration rate.
If [default: None], uses n/elapsed.
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
postfix : *, optional
Similar to `prefix`, but placed at the end
(e.g. for additional stats).
Note: postfix is usually a string (not a dict) for this method,
and will if possible be set to postfix = ', ' + postfix.
However other types are supported (#382).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
Returns
-------
out : Formatted meter and stats, ready to display.
"""
# sanity check: total
if total and n >= (total + 0.5): # allow float imprecision (#849)
total = None
# apply custom scale if necessary
if unit_scale and unit_scale not in (True, 1):
if total:
total *= unit_scale
n *= unit_scale
if rate:
rate *= unit_scale # by default rate = 1 / self.avg_time
unit_scale = False
elapsed_str = tqdm.format_interval(elapsed)
# if unspecified, attempt to use rate = average speed
# (we allow manual override since predicting time is an arcane art)
if rate is None and elapsed:
rate = n / elapsed
inv_rate = 1 / rate if rate else None
format_sizeof = tqdm.format_sizeof
rate_noinv_fmt = ((format_sizeof(rate) if unit_scale else
'{0:5.2f}'.format(rate))
if rate else '?') + unit + '/s'
rate_inv_fmt = ((format_sizeof(inv_rate) if unit_scale else
'{0:5.2f}'.format(inv_rate))
if inv_rate else '?') + 's/' + unit
rate_fmt = rate_inv_fmt if inv_rate and inv_rate > 1 else rate_noinv_fmt
if unit_scale:
n_fmt = format_sizeof(n, divisor=unit_divisor)
total_fmt = format_sizeof(total, divisor=unit_divisor) \
if total is not None else '?'
else:
n_fmt = str(n)
total_fmt = str(total) if total is not None else '?'
try:
postfix = ', ' + postfix if postfix else ''
except TypeError:
pass
remaining = (total - n) / rate if rate and total else 0
remaining_str = tqdm.format_interval(remaining) if rate else '?'
# format the stats displayed to the left and right sides of the bar
if prefix:
# old prefix setup work around
bool_prefix_colon_already = (prefix[-2:] == ": ")
l_bar = prefix if bool_prefix_colon_already else prefix + ": "
else:
l_bar = ''
r_bar = '| {0}/{1} [{2}<{3}, {4}{5}]'.format(
n_fmt, total_fmt, elapsed_str, remaining_str, rate_fmt, postfix)
# Custom bar formatting
# Populate a dict with all available progress indicators
format_dict = dict(
# slight extension of self.format_dict
n=n, n_fmt=n_fmt, total=total, total_fmt=total_fmt,
elapsed=elapsed_str, elapsed_s=elapsed,
ncols=ncols, desc=prefix or '', unit=unit,
rate=inv_rate if inv_rate and inv_rate > 1 else rate,
rate_fmt=rate_fmt, rate_noinv=rate,
rate_noinv_fmt=rate_noinv_fmt, rate_inv=inv_rate,
rate_inv_fmt=rate_inv_fmt,
postfix=postfix, unit_divisor=unit_divisor,
# plus more useful definitions
remaining=remaining_str, remaining_s=remaining,
l_bar=l_bar, r_bar=r_bar,
**extra_kwargs)
# total is known: we can predict some stats
if total:
# fractional and percentage progress
frac = n / total
percentage = frac * 100
l_bar += '{0:3.0f}%|'.format(percentage)
if ncols == 0:
return l_bar[:-1] + r_bar[1:]
format_dict.update(l_bar=l_bar)
if bar_format:
format_dict.update(percentage=percentage)
# auto-remove colon for empty `desc`
if not prefix:
bar_format = bar_format.replace("{desc}: ", '')
else:
bar_format = "{l_bar}{bar}{r_bar}"
full_bar = FormatReplace()
try:
nobar = bar_format.format(bar=full_bar, **format_dict)
except UnicodeEncodeError:
bar_format = _unicode(bar_format)
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
# no {bar}, we can just format and return
return nobar
# Formatting progress bar space available for bar's display
full_bar = Bar(
frac,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.ASCII if ascii is True else ascii or Bar.UTF)
if not _is_ascii(full_bar.charset) and _is_ascii(bar_format):
bar_format = _unicode(bar_format)
return bar_format.format(bar=full_bar, **format_dict)
elif bar_format:
# user-specified bar_format but no total
l_bar += '|'
format_dict.update(l_bar=l_bar, percentage=0)
full_bar = FormatReplace()
nobar = bar_format.format(bar=full_bar, **format_dict)
if not full_bar.format_called:
return nobar
full_bar = Bar(
0,
max(1, ncols - _text_width(RE_ANSI.sub('', nobar)))
if ncols else 10,
charset=Bar.BLANK)
return bar_format.format(bar=full_bar, **format_dict)
else:
# no total: no progressbar, ETA, just progress stats
return ((prefix + ": ") if prefix else '') + \
'{0}{1} [{2}, {3}{4}]'.format(
n_fmt, unit, elapsed_str, rate_fmt, postfix)
def __new__(cls, *args, **kwargs):
# Create a new instance
instance = object.__new__(cls)
# Construct the lock if it does not exist
with cls.get_lock():
# Add to the list of instances
if not hasattr(cls, '_instances'):
cls._instances = WeakSet()
cls._instances.add(instance)
# Create the monitoring thread
if cls.monitor_interval and (cls.monitor is None or not
cls.monitor.report()):
try:
cls.monitor = TMonitor(cls, cls.monitor_interval)
except Exception as e: # pragma: nocover
warn("tqdm:disabling monitor support"
" (monitor_interval = 0) due to:\n" + str(e),
TqdmMonitorWarning, stacklevel=2)
cls.monitor_interval = 0
# Return the instance
return instance
@classmethod
def _get_free_pos(cls, instance=None):
"""Skips specified instance."""
positions = set(abs(inst.pos) for inst in cls._instances
if inst is not instance and hasattr(inst, "pos"))
return min(set(range(len(positions) + 1)).difference(positions))
@classmethod
def _decr_instances(cls, instance):
"""
Remove from list and reposition other bars
so that newer bars won't overlap previous bars
"""
with cls._lock:
try:
cls._instances.remove(instance)
except KeyError:
# if not instance.gui: # pragma: no cover
# raise
pass # py2: maybe magically removed already
# else:
if not instance.gui:
for inst in cls._instances:
# negative `pos` means fixed
if hasattr(inst, "pos") and inst.pos > abs(instance.pos):
inst.clear(nolock=True)
inst.pos -= 1
# TODO: check this doesn't overwrite another fixed bar
# Kill monitor if no instances are left
if not cls._instances and cls.monitor:
try:
cls.monitor.exit()
del cls.monitor
except AttributeError: # pragma: nocover
pass
else:
cls.monitor = None
@classmethod
def write(cls, s, file=None, end="\n", nolock=False):
"""Print a message via tqdm (without overlap with bars)."""
fp = file if file is not None else sys.stdout
with cls.external_write_mode(file=file, nolock=nolock):
# Write the message
fp.write(s)
fp.write(end)
@classmethod
@contextmanager
def external_write_mode(cls, file=None, nolock=False):
"""
Disable tqdm within context and refresh tqdm when exits.
Useful when writing to standard output stream
"""
fp = file if file is not None else sys.stdout
if not nolock:
cls.get_lock().acquire()
# Clear all bars
inst_cleared = []
for inst in getattr(cls, '_instances', []):
# Clear instance if in the target output file
# or if write output + tqdm output are both either
# sys.stdout or sys.stderr (because both are mixed in terminal)
if hasattr(inst, "start_t") and (inst.fp == fp or all(
f in (sys.stdout, sys.stderr) for f in (fp, inst.fp))):
inst.clear(nolock=True)
inst_cleared.append(inst)
yield
# Force refresh display of bars we cleared
for inst in inst_cleared:
inst.refresh(nolock=True)
if not nolock:
cls._lock.release()
@classmethod
def set_lock(cls, lock):
"""Set the global lock."""
cls._lock = lock
@classmethod
def get_lock(cls):
"""Get the global lock. Construct it if it does not exist."""
if not hasattr(cls, '_lock'):
cls._lock = TqdmDefaultWriteLock()
return cls._lock
@classmethod
def pandas(tclass, *targs, **tkwargs):
"""
Registers the given `tqdm` class with
pandas.core.
( frame.DataFrame
| series.Series
| groupby.(generic.)DataFrameGroupBy
| groupby.(generic.)SeriesGroupBy
).progress_apply
A new instance will be create every time `progress_apply` is called,
and each instance will automatically close() upon completion.
Parameters
----------
targs, tkwargs : arguments for the tqdm instance
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from tqdm import tqdm
>>> from tqdm.gui import tqdm as tqdm_gui
>>>
>>> df = pd.DataFrame(np.random.randint(0, 100, (100000, 6)))
>>> tqdm.pandas(ncols=50) # can use tqdm_gui, optional kwargs, etc
>>> # Now you can use `progress_apply` instead of `apply`
>>> df.groupby(0).progress_apply(lambda x: x**2)
References
----------
https://stackoverflow.com/questions/18603270/
progress-indicator-during-pandas-operations-python
"""
from pandas.core.frame import DataFrame
from pandas.core.series import Series
try:
from pandas import Panel
except ImportError: # TODO: pandas>0.25.2
Panel = None
try: # pandas>=0.18.0
from pandas.core.window import _Rolling_and_Expanding
except ImportError: # pragma: no cover
_Rolling_and_Expanding = None
try: # pandas>=0.25.0
from pandas.core.groupby.generic import DataFrameGroupBy, \
SeriesGroupBy # , NDFrameGroupBy
except ImportError:
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import DataFrameGroupBy, \
SeriesGroupBy
except ImportError:
from pandas.core.groupby import DataFrameGroupBy, \
SeriesGroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import GroupBy
except ImportError:
from pandas.core.groupby import GroupBy
try: # pandas>=0.23.0
from pandas.core.groupby.groupby import PanelGroupBy
except ImportError:
try:
from pandas.core.groupby import PanelGroupBy
except ImportError: # pandas>=0.25.0
PanelGroupBy = None
deprecated_t = [tkwargs.pop('deprecated_t', None)]
def inner_generator(df_function='apply'):
def inner(df, func, *args, **kwargs):
"""
Parameters
----------
df : (DataFrame|Series)[GroupBy]
Data (may be grouped).
func : function
To be applied on the (grouped) data.
**kwargs : optional
Transmitted to `df.apply()`.
"""
# Precompute total iterations
total = tkwargs.pop("total", getattr(df, 'ngroups', None))
if total is None: # not grouped
if df_function == 'applymap':
total = df.size
elif isinstance(df, Series):
total = len(df)
elif _Rolling_and_Expanding is None or \
not isinstance(df, _Rolling_and_Expanding):
# DataFrame or Panel
axis = kwargs.get('axis', 0)
if axis == 'index':
axis = 0
elif axis == 'columns':
axis = 1
# when axis=0, total is shape[axis1]
total = df.size // df.shape[axis]
# Init bar
if deprecated_t[0] is not None:
t = deprecated_t[0]
deprecated_t[0] = None
else:
t = tclass(*targs, total=total, **tkwargs)
if len(args) > 0:
# *args intentionally not supported (see #244, #299)
TqdmDeprecationWarning(
"Except func, normal arguments are intentionally" +
" not supported by" +
" `(DataFrame|Series|GroupBy).progress_apply`." +
" Use keyword arguments instead.",
fp_write=getattr(t.fp, 'write', sys.stderr.write))
try:
func = df._is_builtin_func(func)
except TypeError:
pass
# Define bar updating wrapper
def wrapper(*args, **kwargs):
# update tbar correctly
# it seems `pandas apply` calls `func` twice
# on the first column/row to decide whether it can
# take a fast or slow code path; so stop when t.total==t.n
t.update(n=1 if not t.total or t.n < t.total else 0)
return func(*args, **kwargs)
# Apply the provided function (in **kwargs)
# on the df using our wrapper (which provides bar updating)
result = getattr(df, df_function)(wrapper, **kwargs)
# Close bar and return pandas calculation result
t.close()
return result
return inner
# Monkeypatch pandas to provide easy methods
# Enable custom tqdm progress in pandas!
Series.progress_apply = inner_generator()
SeriesGroupBy.progress_apply = inner_generator()
Series.progress_map = inner_generator('map')
SeriesGroupBy.progress_map = inner_generator('map')
DataFrame.progress_apply = inner_generator()
DataFrameGroupBy.progress_apply = inner_generator()
DataFrame.progress_applymap = inner_generator('applymap')
if Panel is not None:
Panel.progress_apply = inner_generator()
if PanelGroupBy is not None:
PanelGroupBy.progress_apply = inner_generator()
GroupBy.progress_apply = inner_generator()
GroupBy.progress_aggregate = inner_generator('aggregate')
GroupBy.progress_transform = inner_generator('transform')
if _Rolling_and_Expanding is not None: # pragma: no cover
_Rolling_and_Expanding.progress_apply = inner_generator()
def __init__(self, iterable=None, desc=None, total=None, leave=True,
file=None, ncols=None, mininterval=0.1, maxinterval=10.0,
miniters=None, ascii=None, disable=False, unit='it',
unit_scale=False, dynamic_ncols=False, smoothing=0.3,
bar_format=None, initial=0, position=None, postfix=None,
unit_divisor=1000, write_bytes=None, lock_args=None,
gui=False, **kwargs):
"""
Parameters
----------
iterable : iterable, optional
Iterable to decorate with a progressbar.
Leave blank to manually manage the updates.
desc : str, optional
Prefix for the progressbar.
total : int or float, optional
The number of expected iterations. If unspecified,
len(iterable) is used if possible. If float("inf") or as a last
resort, only basic progress statistics are displayed
(no ETA, no progressbar).
If `gui` is True and this parameter needs subsequent updating,
specify an initial arbitrary large positive number,
e.g. 9e9.
leave : bool, optional
If [default: True], keeps all traces of the progressbar
upon termination of iteration.
If `None`, will leave only if `position` is `0`.
file : `io.TextIOWrapper` or `io.StringIO`, optional
Specifies where to output the progress messages
(default: sys.stderr). Uses `file.write(str)` and `file.flush()`
methods. For encoding, see `write_bytes`.
ncols : int, optional
The width of the entire output message. If specified,
dynamically resizes the progressbar to stay within this bound.
If unspecified, attempts to use environment width. The
fallback is a meter width of 10 and no limit for the counter and
statistics. If 0, will not print any meter (only stats).
mininterval : float, optional
Minimum progress display update interval [default: 0.1] seconds.
maxinterval : float, optional
Maximum progress display update interval [default: 10] seconds.
Automatically adjusts `miniters` to correspond to `mininterval`
after long display update lag. Only works if `dynamic_miniters`
or monitor thread is enabled.
miniters : int or float, optional
Minimum progress display update interval, in iterations.
If 0 and `dynamic_miniters`, will automatically adjust to equal
`mininterval` (more CPU efficient, good for tight loops).
If > 0, will skip display of specified number of iterations.
Tweak this and `mininterval` to get very efficient loops.
If your progress is erratic with both fast and slow iterations
(network, skipping items, etc) you should set miniters=1.
ascii : bool or str, optional
If unspecified or False, use unicode (smooth blocks) to fill
the meter. The fallback is to use ASCII characters " 123456789#".
disable : bool, optional
Whether to disable the entire progressbar wrapper
[default: False]. If set to None, disable on non-TTY.
unit : str, optional
String that will be used to define the unit of each iteration
[default: it].
unit_scale : bool or int or float, optional
If 1 or True, the number of iterations will be reduced/scaled
automatically and a metric prefix following the
International System of Units standard will be added
(kilo, mega, etc.) [default: False]. If any other non-zero
number, will scale `total` and `n`.
dynamic_ncols : bool, optional
If set, constantly alters `ncols` to the environment (allowing
for window resizes) [default: False].
smoothing : float, optional
Exponential moving average smoothing factor for speed estimates
(ignored in GUI mode). Ranges from 0 (average speed) to 1
(current/instantaneous speed) [default: 0.3].
bar_format : str, optional
Specify a custom bar string formatting. May impact performance.
[default: '{l_bar}{bar}{r_bar}'], where
l_bar='{desc}: {percentage:3.0f}%|' and
r_bar='| {n_fmt}/{total_fmt} [{elapsed}<{remaining}, '
'{rate_fmt}{postfix}]'
Possible vars: l_bar, bar, r_bar, n, n_fmt, total, total_fmt,
percentage, elapsed, elapsed_s, ncols, desc, unit,
rate, rate_fmt, rate_noinv, rate_noinv_fmt,
rate_inv, rate_inv_fmt, postfix, unit_divisor,
remaining, remaining_s.
Note that a trailing ": " is automatically removed after {desc}
if the latter is empty.
initial : int or float, optional
The initial counter value. Useful when restarting a progress
bar [default: 0]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
position : int, optional
Specify the line offset to print this bar (starting from 0)
Automatic if unspecified.
Useful to manage multiple bars at once (eg, from threads).
postfix : dict or *, optional
Specify additional stats to display at the end of the bar.
Calls `set_postfix(**postfix)` if possible (dict).
unit_divisor : float, optional
[default: 1000], ignored unless `unit_scale` is True.
write_bytes : bool, optional
If (default: None) and `file` is unspecified,
bytes will be written in Python 2. If `True` will also write
bytes. In all other cases will default to unicode.
lock_args : tuple, optional
Passed to `refresh` for intermediate output
(initialisation, iterating, and updating).
gui : bool, optional
WARNING: internal parameter - do not use.
Use tqdm.gui.tqdm(...) instead. If set, will attempt to use
matplotlib animations for a graphical output [default: False].
Returns
-------
out : decorated iterator.
"""
if write_bytes is None:
write_bytes = file is None and sys.version_info < (3,)
if file is None:
file = sys.stderr
if write_bytes:
# Despite coercing unicode into bytes, py2 sys.std* streams
# should have bytes written to them.
file = SimpleTextIOWrapper(
file, encoding=getattr(file, 'encoding', None) or 'utf-8')
if disable is None and hasattr(file, "isatty") and not file.isatty():
disable = True
if total is None and iterable is not None:
try:
total = len(iterable)
except (TypeError, AttributeError):
total = None
if total == float("inf"):
# Infinite iterations, behave same as unknown
total = None
if disable:
self.iterable = iterable
self.disable = disable
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
self.n = initial
self.total = total
return
if kwargs:
self.disable = True
with self._lock:
self.pos = self._get_free_pos(self)
self._instances.remove(self)
raise (
TqdmDeprecationWarning(
"`nested` is deprecated and automated.\n"
"Use `position` instead for manual control.\n",
fp_write=getattr(file, 'write', sys.stderr.write))
if "nested" in kwargs else
TqdmKeyError("Unknown argument(s): " + str(kwargs)))
# Preprocess the arguments
if ((ncols is None) and (file in (sys.stderr, sys.stdout))) or \
dynamic_ncols: # pragma: no cover
if dynamic_ncols:
dynamic_ncols = _environ_cols_wrapper()
if dynamic_ncols:
ncols = dynamic_ncols(file)
else:
_dynamic_ncols = _environ_cols_wrapper()
if _dynamic_ncols:
ncols = _dynamic_ncols(file)
if miniters is None:
miniters = 0
dynamic_miniters = True
else:
dynamic_miniters = False
if mininterval is None:
mininterval = 0
if maxinterval is None:
maxinterval = 0
if ascii is None:
ascii = not _supports_unicode(file)
if bar_format and not ((ascii is True) or _is_ascii(ascii)):
# Convert bar format into unicode since terminal uses unicode
bar_format = _unicode(bar_format)
if smoothing is None:
smoothing = 0
# Store the arguments
self.iterable = iterable
self.desc = desc or ''
self.total = total
self.leave = leave
self.fp = file
self.ncols = ncols
self.mininterval = mininterval
self.maxinterval = maxinterval
self.miniters = miniters
self.dynamic_miniters = dynamic_miniters
self.ascii = ascii
self.disable = disable
self.unit = unit
self.unit_scale = unit_scale
self.unit_divisor = unit_divisor
self.lock_args = lock_args
self.gui = gui
self.dynamic_ncols = dynamic_ncols
self.smoothing = smoothing
self.avg_time = None
self._time = time
self.bar_format = bar_format
self.postfix = None
if postfix:
try:
self.set_postfix(refresh=False, **postfix)
except TypeError:
self.postfix = postfix
# Init the iterations counters
self.last_print_n = initial
self.n = initial
# if nested, at initial sp() call we replace '\r' by '\n' to
# not overwrite the outer progress bar
with self._lock:
if position is None:
self.pos = self._get_free_pos(self)
else: # mark fixed positions as negative
self.pos = -position
if not gui:
# Initialize the screen printer
self.sp = self.status_printer(self.fp)
self.refresh(lock_args=self.lock_args)
# Init the time counter
self.last_print_t = self._time()
# NB: Avoid race conditions by setting start_t at the very end of init
self.start_t = self.last_print_t
def __bool__(self):
if self.total is not None:
return self.total > 0
if self.iterable is None:
raise TypeError('bool() undefined when iterable == total == None')
return bool(self.iterable)
def __nonzero__(self):
return self.__bool__()
def __len__(self):
return self.total if self.iterable is None else \
(self.iterable.shape[0] if hasattr(self.iterable, "shape")
else len(self.iterable) if hasattr(self.iterable, "__len__")
else getattr(self, "total", None))
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
try:
self.close()
except AttributeError:
# maybe eager thread cleanup upon external error
if (exc_type, exc_value, traceback) == (None, None, None):
raise
warn("AttributeError ignored", TqdmWarning, stacklevel=2)
def __del__(self):
self.close()
def __repr__(self):
return self.format_meter(**self.format_dict)
@property
def _comparable(self):
return abs(getattr(self, "pos", 1 << 31))
def __hash__(self):
return id(self)
def __iter__(self):
"""Backward-compatibility to use: for x in tqdm(iterable)"""
# Inlining instance variables as locals (speed optimisation)
iterable = self.iterable
# If the bar is disabled, then just walk the iterable
# (note: keep this check outside the loop for performance)
if self.disable:
for obj in iterable:
yield obj
return
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
if not hasattr(self, 'sp'):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)` instead of"
" `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
"""
Manually update the progress bar, useful for streams
such as reading files.
E.g.:
>>> t = tqdm(total=filesize) # Initialise
>>> for current_buffer in stream:
... ...
... t.update(len(current_buffer))
>>> t.close()
The last line is highly recommended, but possibly not necessary if
`t.update()` will be called in such a way that `filesize` will be
exactly reached and printed.
Parameters
----------
n : int or float, optional
Increment to add to the internal counter of iterations
[default: 1]. If using float, consider specifying `{n:.3f}`
or similar in `bar_format`, or specifying `unit_scale`.
"""
# N.B.: see __iter__() for more comments.
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
if not hasattr(self, "sp"):
raise TqdmDeprecationWarning(
"Please use `tqdm.gui.tqdm(...)`"
" instead of `tqdm(..., gui=True)`\n",
fp_write=getattr(self.fp, 'write', sys.stderr.write))
self.refresh(lock_args=self.lock_args)
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
"""Cleanup and (if leave=False) close the progressbar."""
if self.disable:
return
# Prevent multiple closures
self.disable = True
# decrement instance pos and remove from internal set
pos = abs(self.pos)
self._decr_instances(self)
# GUI mode
if not hasattr(self, "sp"):
return
# annoyingly, _supports_unicode isn't good enough
def fp_write(s):
self.fp.write(_unicode(s))
try:
fp_write('')
except ValueError as e:
if 'closed' in str(e):
return
raise # pragma: no cover
leave = pos == 0 if self.leave is None else self.leave
with self._lock:
if leave:
# stats for overall rate (no weighted average)
self.avg_time = None
self.display(pos=0)
fp_write('\n')
else:
self.display(msg='', pos=pos)
if not pos:
fp_write('\r')
def clear(self, nolock=False):
"""Clear current bar display."""
if self.disable:
return
if not nolock:
self._lock.acquire()
self.moveto(abs(self.pos))
self.sp('')
self.fp.write('\r') # place cursor back at the beginning of line
self.moveto(-abs(self.pos))
if not nolock:
self._lock.release()
def refresh(self, nolock=False, lock_args=None):
"""
Force refresh the display of this bar.
Parameters
----------
nolock : bool, optional
If `True`, does not lock.
If [default: `False`]: calls `acquire()` on internal lock.
lock_args : tuple, optional
Passed to internal lock's `acquire()`.
If specified, will only `display()` if `acquire()` returns `True`.
"""
if self.disable:
return
if not nolock:
if lock_args:
if not self._lock.acquire(*lock_args):
return False
else:
self._lock.acquire()
self.display()
if not nolock:
self._lock.release()
return True
def unpause(self):
"""Restart tqdm timer from last print time."""
cur_t = self._time()
self.start_t += cur_t - self.last_print_t
self.last_print_t = cur_t
def reset(self, total=None):
"""
Resets to 0 iterations for repeated use.
Consider combining with `leave=True`.
Parameters
----------
total : int or float, optional. Total to use for the new bar.
"""
self.last_print_n = self.n = 0
self.last_print_t = self.start_t = self._time()
if total is not None:
self.total = total
self.refresh()
def set_description(self, desc=None, refresh=True):
"""
Set/modify description of the progress bar.
Parameters
----------
desc : str, optional
refresh : bool, optional
Forces refresh [default: True].
"""
self.desc = desc + ': ' if desc else ''
if refresh:
self.refresh()
def set_description_str(self, desc=None, refresh=True):
"""Set/modify description without ': ' appended."""
self.desc = desc or ''
if refresh:
self.refresh()
def set_postfix(self, ordered_dict=None, refresh=True, **kwargs):
"""
Set/modify postfix (additional stats)
with automatic formatting based on datatype.
Parameters
----------
ordered_dict : dict or OrderedDict, optional
refresh : bool, optional
Forces refresh [default: True].
kwargs : dict, optional
"""
# Sort in alphabetical order to be more deterministic
postfix = _OrderedDict([] if ordered_dict is None else ordered_dict)
for key in sorted(kwargs.keys()):
postfix[key] = kwargs[key]
# Preprocess stats according to datatype
for key in postfix.keys():
# Number: limit the length of the string
if isinstance(postfix[key], Number):
postfix[key] = self.format_num(postfix[key])
# Else for any other type, try to get the string conversion
elif not isinstance(postfix[key], _basestring):
postfix[key] = str(postfix[key])
# Else if it's a string, don't need to preprocess anything
# Stitch together to get the final postfix
self.postfix = ', '.join(key + '=' + postfix[key].strip()
for key in postfix.keys())
if refresh:
self.refresh()
def set_postfix_str(self, s='', refresh=True):
"""
Postfix without dictionary expansion, similar to prefix handling.
"""
self.postfix = str(s)
if refresh:
self.refresh()
def moveto(self, n):
# TODO: private method
self.fp.write(_unicode('\n' * n + _term_move_up() * -n))
self.fp.flush()
@property
def format_dict(self):
"""Public API for read-only member access."""
return dict(
n=self.n, total=self.total,
elapsed=self._time() - self.start_t
if hasattr(self, 'start_t') else 0,
ncols=self.dynamic_ncols(self.fp)
if self.dynamic_ncols else self.ncols,
prefix=self.desc, ascii=self.ascii, unit=self.unit,
unit_scale=self.unit_scale,
rate=1 / self.avg_time if self.avg_time else None,
bar_format=self.bar_format, postfix=self.postfix,
unit_divisor=self.unit_divisor)
def display(self, msg=None, pos=None):
"""
Use `self.sp` to display `msg` in the specified `pos`.
Consider overloading this function when inheriting to use e.g.:
`self.some_frontend(**self.format_dict)` instead of `self.sp`.
Parameters
----------
msg : str, optional. What to display (default: `repr(self)`).
pos : int, optional. Position to `moveto`
(default: `abs(self.pos)`).
"""
if pos is None:
pos = abs(self.pos)
if pos:
self.moveto(pos)
self.sp(self.__repr__() if msg is None else msg)
if pos:
self.moveto(-pos)
@classmethod
@contextmanager
def wrapattr(tclass, stream, method, total=None, bytes=True, **tkwargs):
"""
stream : file-like object.
method : str, "read" or "write". The result of `read()` and
the first argument of `write()` should have a `len()`.
>>> with tqdm.wrapattr(file_obj, "read", total=file_obj.size) as fobj:
... while True:
... chunk = fobj.read(chunk_size)
... if not chunk:
... break
"""
with tclass(total=total, **tkwargs) as t:
if bytes:
t.unit = "B"
t.unit_scale = True
t.unit_divisor = 1024
yield CallbackIOWrapper(t.update, stream, method)
def trange(*args, **kwargs):
"""
A shortcut for tqdm(xrange(*args), **kwargs).
On Python3+ range is used instead of xrange.
"""
return tqdm(_range(*args), **kwargs)
| bsd-3-clause |
LiaoPan/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
MSeifert04/astropy | astropy/visualization/scripts/fits2bitmap.py | 2 | 7795 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
from distutils.version import LooseVersion
from astropy.visualization.mpl_normalize import simple_norm
from astropy import log
from astropy.io.fits import getdata
def fits2bitmap(filename, ext=0, out_fn=None, stretch='linear',
power=1.0, asinh_a=0.1, min_cut=None, max_cut=None,
min_percent=None, max_percent=None, percent=None,
cmap='Greys_r'):
"""
Create a bitmap file from a FITS image, applying a stretching
transform between minimum and maximum cut levels and a matplotlib
colormap.
Parameters
----------
filename : str
The filename of the FITS file.
ext : int
FITS extension name or number of the image to convert. The
default is 0.
out_fn : str
The filename of the output bitmap image. The type of bitmap
is determined by the filename extension (e.g. '.jpg', '.png').
The default is a PNG file with the same name as the FITS file.
stretch : {{'linear', 'sqrt', 'power', log', 'asinh'}}
The stretching function to apply to the image. The default is
'linear'.
power : float, optional
The power index for ``stretch='power'``. The default is 1.0.
asinh_a : float, optional
For ``stretch='asinh'``, the value where the asinh curve
transitions from linear to logarithmic behavior, expressed as a
fraction of the normalized image. Must be in the range between
0 and 1. The default is 0.1.
min_cut : float, optional
The pixel value of the minimum cut level. Data values less than
``min_cut`` will set to ``min_cut`` before stretching the image.
The default is the image minimum. ``min_cut`` overrides
``min_percent``.
max_cut : float, optional
The pixel value of the maximum cut level. Data values greater
than ``min_cut`` will set to ``min_cut`` before stretching the
image. The default is the image maximum. ``max_cut`` overrides
``max_percent``.
min_percent : float, optional
The percentile value used to determine the pixel value of
minimum cut level. The default is 0.0. ``min_percent``
overrides ``percent``.
max_percent : float, optional
The percentile value used to determine the pixel value of
maximum cut level. The default is 100.0. ``max_percent``
overrides ``percent``.
percent : float, optional
The percentage of the image values used to determine the pixel
values of the minimum and maximum cut levels. The lower cut
level will set at the ``(100 - percent) / 2`` percentile, while
the upper cut level will be set at the ``(100 + percent) / 2``
percentile. The default is 100.0. ``percent`` is ignored if
either ``min_percent`` or ``max_percent`` is input.
cmap : str
The matplotlib color map name. The default is 'Greys_r'.
"""
import matplotlib
import matplotlib.cm as cm
import matplotlib.image as mimg
# __main__ gives ext as a string
try:
ext = int(ext)
except ValueError:
pass
try:
image = getdata(filename, ext)
except Exception as e:
log.critical(e)
return 1
if image.ndim != 2:
log.critical('data in FITS extension {} is not a 2D array'
.format(ext))
if out_fn is None:
out_fn = os.path.splitext(filename)[0]
if out_fn.endswith('.fits'):
out_fn = os.path.splitext(out_fn)[0]
out_fn += '.png'
# need to explicitly define the output format due to a bug in
# matplotlib (<= 2.1), otherwise the format will always be PNG
out_format = os.path.splitext(out_fn)[1][1:]
# workaround for matplotlib 2.0.0 bug where png images are inverted
# (mpl-#7656)
if (out_format.lower() == 'png' and
LooseVersion(matplotlib.__version__) == LooseVersion('2.0.0')):
image = image[::-1]
try:
cm.get_cmap(cmap)
except ValueError:
log.critical('{} is not a valid matplotlib colormap name.'
.format(cmap))
return 1
norm = simple_norm(image, stretch=stretch, power=power, asinh_a=asinh_a,
min_cut=min_cut, max_cut=max_cut,
min_percent=min_percent, max_percent=max_percent,
percent=percent)
mimg.imsave(out_fn, norm(image), cmap=cmap, origin='lower',
format=out_format)
log.info(f'Saved file to {out_fn}.')
def main(args=None):
import argparse
parser = argparse.ArgumentParser(
description='Create a bitmap file from a FITS image.')
parser.add_argument('-e', '--ext', metavar='hdu', default=0,
help='Specify the HDU extension number or name '
'(Default is 0).')
parser.add_argument('-o', metavar='filename', type=str, default=None,
help='Filename for the output image (Default is a '
'PNG file with the same name as the FITS file).')
parser.add_argument('--stretch', type=str, default='linear',
help='Type of image stretching ("linear", "sqrt", '
'"power", "log", or "asinh") (Default is "linear").')
parser.add_argument('--power', type=float, default=1.0,
help='Power index for "power" stretching (Default is '
'1.0).')
parser.add_argument('--asinh_a', type=float, default=0.1,
help='The value in normalized image where the asinh '
'curve transitions from linear to logarithmic '
'behavior (used only for "asinh" stretch) '
'(Default is 0.1).')
parser.add_argument('--min_cut', type=float, default=None,
help='The pixel value of the minimum cut level '
'(Default is the image minimum).')
parser.add_argument('--max_cut', type=float, default=None,
help='The pixel value of the maximum cut level '
'(Default is the image maximum).')
parser.add_argument('--min_percent', type=float, default=None,
help='The percentile value used to determine the '
'minimum cut level (Default is 0).')
parser.add_argument('--max_percent', type=float, default=None,
help='The percentile value used to determine the '
'maximum cut level (Default is 100).')
parser.add_argument('--percent', type=float, default=None,
help='The percentage of the image values used to '
'determine the pixel values of the minimum and '
'maximum cut levels (Default is 100).')
parser.add_argument('--cmap', metavar='colormap_name', type=str,
default='Greys_r', help='matplotlib color map name '
'(Default is "Greys_r").')
parser.add_argument('filename', nargs='+',
help='Path to one or more FITS files to convert')
args = parser.parse_args(args)
for filename in args.filename:
fits2bitmap(filename, ext=args.ext, out_fn=args.o,
stretch=args.stretch, min_cut=args.min_cut,
max_cut=args.max_cut, min_percent=args.min_percent,
max_percent=args.max_percent, percent=args.percent,
power=args.power, asinh_a=args.asinh_a, cmap=args.cmap)
| bsd-3-clause |
zhenv5/scikit-learn | sklearn/metrics/cluster/tests/test_unsupervised.py | 230 | 2823 | import numpy as np
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn.metrics.cluster.unsupervised import silhouette_score
from sklearn.metrics import pairwise_distances
from sklearn.utils.testing import assert_false, assert_almost_equal
from sklearn.utils.testing import assert_raises_regexp
def test_silhouette():
# Tests the Silhouette Coefficient.
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
D = pairwise_distances(X, metric='euclidean')
# Given that the actual labels are used, we can assume that S would be
# positive.
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
# Test without calculating D
silhouette_metric = silhouette_score(X, y, metric='euclidean')
assert_almost_equal(silhouette, silhouette_metric)
# Test with sampling
silhouette = silhouette_score(D, y, metric='precomputed',
sample_size=int(X.shape[0] / 2),
random_state=0)
silhouette_metric = silhouette_score(X, y, metric='euclidean',
sample_size=int(X.shape[0] / 2),
random_state=0)
assert(silhouette > 0)
assert(silhouette_metric > 0)
assert_almost_equal(silhouette_metric, silhouette)
# Test with sparse X
X_sparse = csr_matrix(X)
D = pairwise_distances(X_sparse, metric='euclidean')
silhouette = silhouette_score(D, y, metric='precomputed')
assert(silhouette > 0)
def test_no_nan():
# Assert Silhouette Coefficient != nan when there is 1 sample in a class.
# This tests for the condition that caused issue 960.
# Note that there is only one sample in cluster 0. This used to cause the
# silhouette_score to return nan (see bug #960).
labels = np.array([1, 0, 1, 1, 1])
# The distance matrix doesn't actually matter.
D = np.random.RandomState(0).rand(len(labels), len(labels))
silhouette = silhouette_score(D, labels, metric='precomputed')
assert_false(np.isnan(silhouette))
def test_correct_labelsize():
# Assert 1 < n_labels < n_samples
dataset = datasets.load_iris()
X = dataset.data
# n_labels = n_samples
y = np.arange(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
# n_labels = 1
y = np.zeros(X.shape[0])
assert_raises_regexp(ValueError,
'Number of labels is %d\. Valid values are 2 '
'to n_samples - 1 \(inclusive\)' % len(np.unique(y)),
silhouette_score, X, y)
| bsd-3-clause |
ctherien/pysptools | pysptools/skl/km.py | 1 | 4885 | #
#------------------------------------------------------------------------------
# Copyright (c) 2013-2014, Christian Therien
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#------------------------------------------------------------------------------
#
# km.py - This file is part of the PySptools package.
#
"""
KMeans class
"""
import numpy as np
import sklearn.cluster as cluster
#from . import out
#from .inval import *
from pysptools.classification.out import Output
from pysptools.classification.inval import *
class KMeans(object):
""" KMeans clustering algorithm adapted to hyperspectral imaging """
def __init__(self):
self.cluster = None
self.n_clusters = None
self.output = Output('KMeans')
@PredictInputValidation('KMeans')
def predict(self, M, n_clusters=5, n_jobs=1, init='k-means++'):
"""
KMeans clustering algorithm adapted to hyperspectral imaging.
It is a simple wrapper to the scikit-learn version.
Parameters:
M: `numpy array`
A HSI cube (m x n x p).
n_clusters: `int [default 5]`
The number of clusters to generate.
n_jobs: `int [default 1]`
Taken from scikit-learn doc:
The number of jobs to use for the computation. This works by breaking down the pairwise matrix into n_jobs even slices and computing them in parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is used at all, which is useful for debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one are used.
init: `string or array [default 'k-means++']`
Taken from scikit-learn doc: Method for initialization, defaults to `k-means++`:
`k-means++` : selects initial cluster centers for k-mean clustering in a smart way to speed up convergence. See section Notes in k_init for more details.
`random`: choose k observations (rows) at random from data for the initial centroids.
If an ndarray is passed, it should be of shape (n_clusters, n_features) and gives the initial centers.
Returns: `numpy array`
A cluster map (m x n x c), c is the clusters number .
"""
h, w, numBands = M.shape
self.n_clusters = n_clusters
X = np.reshape(M, (w*h, numBands))
clf = cluster.KMeans(n_clusters=n_clusters, n_jobs=n_jobs, init=init)
cls = clf.fit_predict(X)
self.cluster = np.reshape(cls, (h, w))
return self.cluster
@PlotInputValidation3('KMeans')
def plot(self, path, interpolation='none', colorMap='Accent', suffix=None):
"""
Plot the cluster map.
Parameters:
path: `string`
The path where to put the plot.
interpolation: `string [default none]`
A matplotlib interpolation method.
colorMap: `string [default 'Accent']`
A color map element of
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'],
"Accent" is the default and it fall back on "Jet".
suffix: `string [default None]`
Add a suffix to the file name.
"""
self.output.plot(self.cluster, self.n_clusters, path=path, interpolation=interpolation, colorMap=colorMap, suffix=suffix)
@DisplayInputValidation3('KMeans')
def display(self, interpolation='none', colorMap='Accent', suffix=None):
"""
Display the cluster map.
Parameters:
path: `string`
The path where to put the plot.
interpolation: `string [default none]`
A matplotlib interpolation method.
colorMap: `string [default 'Accent']`
A color map element of
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1', 'Set2', 'Set3'],
"Accent" is the default and it fall back on "Jet".
suffix: `string [default None]`
Add a suffix to the title.
"""
self.output.plot(self.cluster, self.n_clusters, interpolation=interpolation, colorMap=colorMap, suffix=suffix)
| apache-2.0 |
zhenv5/scikit-learn | sklearn/decomposition/__init__.py | 147 | 1421 | """
The :mod:`sklearn.decomposition` module includes matrix decomposition
algorithms, including among others PCA, NMF or ICA. Most of the algorithms of
this module can be regarded as dimensionality reduction techniques.
"""
from .nmf import NMF, ProjectedGradientNMF
from .pca import PCA, RandomizedPCA
from .incremental_pca import IncrementalPCA
from .kernel_pca import KernelPCA
from .sparse_pca import SparsePCA, MiniBatchSparsePCA
from .truncated_svd import TruncatedSVD
from .fastica_ import FastICA, fastica
from .dict_learning import (dict_learning, dict_learning_online, sparse_encode,
DictionaryLearning, MiniBatchDictionaryLearning,
SparseCoder)
from .factor_analysis import FactorAnalysis
from ..utils.extmath import randomized_svd
from .online_lda import LatentDirichletAllocation
__all__ = ['DictionaryLearning',
'FastICA',
'IncrementalPCA',
'KernelPCA',
'MiniBatchDictionaryLearning',
'MiniBatchSparsePCA',
'NMF',
'PCA',
'ProjectedGradientNMF',
'RandomizedPCA',
'SparseCoder',
'SparsePCA',
'dict_learning',
'dict_learning_online',
'fastica',
'randomized_svd',
'sparse_encode',
'FactorAnalysis',
'TruncatedSVD',
'LatentDirichletAllocation']
| bsd-3-clause |
huzq/scikit-learn | sklearn/neighbors/_nearest_centroid.py | 4 | 7789 | # -*- coding: utf-8 -*-
"""
Nearest Centroid Classification
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
#
# License: BSD 3 clause
import warnings
import numpy as np
from scipy import sparse as sp
from ..base import BaseEstimator, ClassifierMixin
from ..metrics.pairwise import pairwise_distances
from ..preprocessing import LabelEncoder
from ..utils.validation import check_array, check_is_fitted
from ..utils.validation import _deprecate_positional_args
from ..utils.sparsefuncs import csc_median_axis_0
from ..utils.multiclass import check_classification_targets
class NearestCentroid(ClassifierMixin, BaseEstimator):
"""Nearest centroid classifier.
Each class is represented by its centroid, with test samples classified to
the class with the nearest centroid.
Read more in the :ref:`User Guide <nearest_centroid_classifier>`.
Parameters
----------
metric : str or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
The centroids for the samples corresponding to each class is the point
from which the sum of the distances (according to the metric) of all
samples that belong to that particular class are minimized.
If the "manhattan" metric is provided, this centroid is the median and
for all other metrics, the centroid is now set to be the mean.
.. versionchanged:: 0.19
``metric='precomputed'`` was deprecated and now raises an error
shrink_threshold : float, default=None
Threshold for shrinking centroids to remove features.
Attributes
----------
centroids_ : array-like of shape (n_classes, n_features)
Centroid of each class.
classes_ : array of shape (n_classes,)
The unique classes labels.
Examples
--------
>>> from sklearn.neighbors import NearestCentroid
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> y = np.array([1, 1, 1, 2, 2, 2])
>>> clf = NearestCentroid()
>>> clf.fit(X, y)
NearestCentroid()
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
sklearn.neighbors.KNeighborsClassifier: nearest neighbors classifier
Notes
-----
When used for text classification with tf-idf vectors, this classifier is
also known as the Rocchio classifier.
References
----------
Tibshirani, R., Hastie, T., Narasimhan, B., & Chu, G. (2002). Diagnosis of
multiple cancer types by shrunken centroids of gene expression. Proceedings
of the National Academy of Sciences of the United States of America,
99(10), 6567-6572. The National Academy of Sciences.
"""
@_deprecate_positional_args
def __init__(self, metric='euclidean', *, shrink_threshold=None):
self.metric = metric
self.shrink_threshold = shrink_threshold
def fit(self, X, y):
"""
Fit the NearestCentroid model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
Note that centroid shrinking cannot be used with sparse matrices.
y : array-like of shape (n_samples,)
Target values (integers)
"""
if self.metric == 'precomputed':
raise ValueError("Precomputed is not supported.")
# If X is sparse and the metric is "manhattan", store it in a csc
# format is easier to calculate the median.
if self.metric == 'manhattan':
X, y = self._validate_data(X, y, accept_sparse=['csc'])
else:
X, y = self._validate_data(X, y, accept_sparse=['csr', 'csc'])
is_X_sparse = sp.issparse(X)
if is_X_sparse and self.shrink_threshold:
raise ValueError("threshold shrinking not supported"
" for sparse input")
check_classification_targets(y)
n_samples, n_features = X.shape
le = LabelEncoder()
y_ind = le.fit_transform(y)
self.classes_ = classes = le.classes_
n_classes = classes.size
if n_classes < 2:
raise ValueError('The number of classes has to be greater than'
' one; got %d class' % (n_classes))
# Mask mapping each class to its members.
self.centroids_ = np.empty((n_classes, n_features), dtype=np.float64)
# Number of clusters in each class.
nk = np.zeros(n_classes)
for cur_class in range(n_classes):
center_mask = y_ind == cur_class
nk[cur_class] = np.sum(center_mask)
if is_X_sparse:
center_mask = np.where(center_mask)[0]
# XXX: Update other averaging methods according to the metrics.
if self.metric == "manhattan":
# NumPy does not calculate median of sparse matrices.
if not is_X_sparse:
self.centroids_[cur_class] = np.median(X[center_mask], axis=0)
else:
self.centroids_[cur_class] = csc_median_axis_0(X[center_mask])
else:
if self.metric != 'euclidean':
warnings.warn("Averaging for metrics other than "
"euclidean and manhattan not supported. "
"The average is set to be the mean."
)
self.centroids_[cur_class] = X[center_mask].mean(axis=0)
if self.shrink_threshold:
dataset_centroid_ = np.mean(X, axis=0)
# m parameter for determining deviation
m = np.sqrt((1. / nk) - (1. / n_samples))
# Calculate deviation using the standard deviation of centroids.
variance = (X - self.centroids_[y_ind]) ** 2
variance = variance.sum(axis=0)
s = np.sqrt(variance / (n_samples - n_classes))
s += np.median(s) # To deter outliers from affecting the results.
mm = m.reshape(len(m), 1) # Reshape to allow broadcasting.
ms = mm * s
deviation = ((self.centroids_ - dataset_centroid_) / ms)
# Soft thresholding: if the deviation crosses 0 during shrinking,
# it becomes zero.
signs = np.sign(deviation)
deviation = (np.abs(deviation) - self.shrink_threshold)
np.clip(deviation, 0, None, out=deviation)
deviation *= signs
# Now adjust the centroids using the deviation
msd = ms * deviation
self.centroids_ = dataset_centroid_[np.newaxis, :] + msd
return self
def predict(self, X):
"""Perform classification on an array of test vectors X.
The predicted class C for each sample in X is returned.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Returns
-------
C : ndarray of shape (n_samples,)
Notes
-----
If the metric constructor parameter is "precomputed", X is assumed to
be the distance matrix between the data to be predicted and
``self.centroids_``.
"""
check_is_fitted(self)
X = check_array(X, accept_sparse='csr')
return self.classes_[pairwise_distances(
X, self.centroids_, metric=self.metric).argmin(axis=1)]
| bsd-3-clause |
theislab/dca | dca/utils.py | 1 | 4911 | import scanpy as sc
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
import scipy as sp
import tensorflow as tf
from tensorflow.contrib.opt import ScipyOptimizerInterface
nb_zero = lambda t, mu: (t/(mu+t))**t
zinb_zero = lambda t, mu, p: p + ((1.-p)*((t/(mu+t))**t))
sigmoid = lambda x: 1. / (1.+np.exp(-x))
logit = lambda x: np.log(x + 1e-7) - np.log(1. - x + 1e-7)
tf_logit = lambda x: tf.cast(tf.log(x + 1e-7) - tf.log(1. - x + 1e-7), 'float32')
log_loss = lambda pred, label: np.sum(-(label*np.log(pred+1e-7)) - ((1.-label)*np.log(1.-pred+1e-7)))
def _lrt(ll_full, ll_reduced, df_full, df_reduced):
# Compute the difference in degrees of freedom.
delta_df = df_full - df_reduced
# Compute the deviance test statistic.
delta_dev = 2 * (ll_full - ll_reduced)
# Compute the p-values based on the deviance and its expection based on the chi-square distribution.
pvals = 1. - sp.stats.chi2(delta_df).cdf(delta_dev)
return pvals
def _fitquad(x, y):
coef, res, _, _ = np.linalg.lstsq((x**2)[:, np.newaxis] , y-x, rcond=None)
ss_exp = res[0]
ss_tot = (np.var(y-x)*len(x))
r2 = 1 - (ss_exp / ss_tot)
#print('Coefs:', coef)
return np.array([coef[0], 1, 0]), r2
def _tf_zinb_zero(mu, t=None):
a, b = tf.Variable([-1.0], dtype='float32'), tf.Variable([0.0], dtype='float32')
if t is None:
t_log = tf.Variable([-10.], dtype='float32')
t = tf.exp(t_log)
p = tf.sigmoid((tf.log(mu+1e-7)*a) + b)
pred = p + ((1.-p)*((t/(mu+t))**t))
pred = tf.cast(pred, 'float32')
return pred, a, b, t
def _optimize_zinb(mu, dropout, theta=None):
pred, a, b, t = _tf_zinb_zero(mu, theta)
#loss = tf.reduce_mean(tf.abs(tf_logit(pred) - tf_logit(dropout)))
loss = tf.losses.log_loss(labels=dropout.astype('float32'),
predictions=pred)
optimizer = ScipyOptimizerInterface(loss, options={'maxiter': 100})
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
optimizer.minimize(sess)
ret_a = sess.run(a)
ret_b = sess.run(b)
if theta is None:
ret_t = sess.run(t)
else:
ret_t = t
return ret_a, ret_b, ret_t
def plot_mean_dropout(ad, title, ax, opt_zinb_theta=False, legend_out=False):
expr = ad.X
mu = expr.mean(0)
do = np.mean(expr == 0, 0)
v = expr.var(axis=0)
coefs, r2 = _fitquad(mu, v)
theta = 1.0/coefs[0]
# zinb fit
coefs = _optimize_zinb(mu, do, theta=theta if not opt_zinb_theta else None)
print(coefs)
#pois_pred = np.exp(-mu)
nb_pred = nb_zero(theta, mu)
zinb_pred = zinb_zero(coefs[2],
mu,
sigmoid((np.log(mu+1e-7)*coefs[0])+coefs[1]))
# calculate log loss for all distr.
#pois_ll = log_loss(pois_pred, do)
nb_ll = log_loss(nb_pred, do)
zinb_ll = log_loss(zinb_pred, do)
ax.plot(mu, do, 'o', c='black', markersize=1)
ax.set(xscale="log")
#sns.lineplot(mu, pois_pred, ax=ax, color='blue')
sns.lineplot(mu, nb_pred, ax=ax, color='red')
sns.lineplot(mu, zinb_pred, ax=ax, color='green')
ax.set_title(title)
ax.set_ylabel('Empirical dropout rate')
ax.set_xlabel(r'Mean expression')
leg_loc = 'best' if not legend_out else 'upper left'
leg_bbox = None if not legend_out else (1.02, 1.)
ax.legend(['Genes',
#r'Poisson $L=%.4f$' % pois_ll,
r'NB($\theta=%.2f)\ L=%.4f$' % ((1./theta), nb_ll),
r'ZINB($\theta=%.2f,\pi=\sigma(%.2f\mu%+.2f)) \ L=%.4f$' % (1.0/coefs[2], coefs[0], coefs[1], zinb_ll)],
loc=leg_loc, bbox_to_anchor=leg_bbox)
zinb_pval = _lrt(-zinb_ll, -nb_ll, 3, 1)
print('p-value: %e' % zinb_pval)
def plot_mean_var(ad, title, ax):
ad = ad.copy()
sc.pp.filter_cells(ad, min_counts=1)
sc.pp.filter_genes(ad, min_counts=1)
m = ad.X.mean(axis=0)
v = ad.X.var(axis=0)
coefs, r2 = _fitquad(m, v)
ax.set(xscale="log", yscale="log")
ax.plot(m, v, 'o', c='black', markersize=1)
poly = np.poly1d(coefs)
sns.lineplot(m, poly(m), ax=ax, color='red')
ax.set_title(title)
ax.set_ylabel('Variance')
ax.set_xlabel(r'$\mu$')
sns.lineplot(m, m, ax=ax, color='blue')
ax.legend(['Genes', r'NB ($\theta=%.2f)\ r^2=%.3f$' % (coefs[0], r2), 'Poisson'])
return coefs[0]
def plot_zeroinf(ad, title, mean_var_plot=False, opt_theta=True):
if mean_var_plot:
f, axs = plt.subplots(1, 2, figsize=(15, 5))
plot_mean_var(ad, title, ax=axs[0])
plot_mean_dropout(ad, title, axs[1], opt_zinb_theta=opt_theta, legend_out=True)
plt.tight_layout()
else:
f, ax = plt.subplots(1, 1, figsize=(10, 5))
plot_mean_dropout(ad, title, ax, opt_zinb_theta=opt_theta, legend_out=True)
plt.tight_layout()
| apache-2.0 |
BNIA/tidyall | src/modules/tidydate.py | 2 | 2523 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""tidydate
This file declares and implements the TidyDate class for TidyAll.
TidyDate converts and formats valid date columns into ISO 8601 (yyyy-mm-dd).
"""
import sys
from dateutil import parser as date_parser
import numpy as np
import pandas as pd
from .settings import TIDY_DATE_SPLIT
class TidyDate(object):
def __init__(self, df, column):
"""Wraps a TidyDate object around a TidyAll dataframe"""
self.df = df
self.date_col = column
@staticmethod
def parse_date(date_str):
try:
return date_parser.parse(date_str)
except (TypeError, ValueError):
try:
def split_date():
return (date_str[-4:], date_str[:-6], date_str[-6:-4])
return date_parser.parse('-'.join(split_date()))
except (TypeError, ValueError):
return None
def __clean_col(self):
"""Parses and standardizes the selected column values
Args:
None
Returns:
None
"""
if self.date_col:
if np.dtype(self.df[self.date_col]) == np.dtype("datetime64[ns]"):
sys.exit("Column is already in date format")
self.df["tidy_date"] = self.df[self.date_col].apply(
lambda x: self.parse_date(str(x))
)
self.df["tidy_date"] = self.df["tidy_date"].apply(
lambda x: x.date() if pd.notnull(x) else x
)
def __split_date(self):
"""Splits the "tidy_date" column into separate tidy year, month and
day columns
Args:
None
Returns:
None
"""
for index, col in enumerate(TIDY_DATE_SPLIT):
try:
self.df[col] = self.df["tidy_date"].apply(
lambda x: int(str(x).split("-")[index])
if pd.notnull(x) else x
)
except IndexError:
continue
def __fill_na(self):
"""Fills values that were unable to be parsed with the original values
Args:
None
Returns:
None
"""
TIDY_DATE_SPLIT.append("tidy_date")
for col in TIDY_DATE_SPLIT:
self.df[col].fillna(self.df[self.date_col], inplace=True)
def parse(self):
self.__clean_col()
self.__split_date()
self.__fill_na()
return self.df
| mit |
JeanKossaifi/scikit-learn | benchmarks/bench_plot_nmf.py | 90 | 5742 | """
Benchmarks of Non-Negative Matrix Factorization
"""
from __future__ import print_function
from collections import defaultdict
import gc
from time import time
import numpy as np
from scipy.linalg import norm
from sklearn.decomposition.nmf import NMF, _initialize_nmf
from sklearn.datasets.samples_generator import make_low_rank_matrix
from sklearn.externals.six.moves import xrange
def alt_nnmf(V, r, max_iter=1000, tol=1e-3, init='random'):
'''
A, S = nnmf(X, r, tol=1e-3, R=None)
Implement Lee & Seung's algorithm
Parameters
----------
V : 2-ndarray, [n_samples, n_features]
input matrix
r : integer
number of latent features
max_iter : integer, optional
maximum number of iterations (default: 1000)
tol : double
tolerance threshold for early exit (when the update factor is within
tol of 1., the function exits)
init : string
Method used to initialize the procedure.
Returns
-------
A : 2-ndarray, [n_samples, r]
Component part of the factorization
S : 2-ndarray, [r, n_features]
Data part of the factorization
Reference
---------
"Algorithms for Non-negative Matrix Factorization"
by Daniel D Lee, Sebastian H Seung
(available at http://citeseer.ist.psu.edu/lee01algorithms.html)
'''
# Nomenclature in the function follows Lee & Seung
eps = 1e-5
n, m = V.shape
W, H = _initialize_nmf(V, r, init, random_state=0)
for i in xrange(max_iter):
updateH = np.dot(W.T, V) / (np.dot(np.dot(W.T, W), H) + eps)
H *= updateH
updateW = np.dot(V, H.T) / (np.dot(W, np.dot(H, H.T)) + eps)
W *= updateW
if i % 10 == 0:
max_update = max(updateW.max(), updateH.max())
if abs(1. - max_update) < tol:
break
return W, H
def report(error, time):
print("Frobenius loss: %.5f" % error)
print("Took: %.2fs" % time)
print()
def benchmark(samples_range, features_range, rank=50, tolerance=1e-5):
timeset = defaultdict(lambda: [])
err = defaultdict(lambda: [])
for n_samples in samples_range:
for n_features in features_range:
print("%2d samples, %2d features" % (n_samples, n_features))
print('=======================')
X = np.abs(make_low_rank_matrix(n_samples, n_features,
effective_rank=rank, tail_strength=0.2))
gc.collect()
print("benchmarking nndsvd-nmf: ")
tstart = time()
m = NMF(n_components=30, tol=tolerance, init='nndsvd').fit(X)
tend = time() - tstart
timeset['nndsvd-nmf'].append(tend)
err['nndsvd-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvda-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvda',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvda-nmf'].append(tend)
err['nndsvda-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking nndsvdar-nmf: ")
tstart = time()
m = NMF(n_components=30, init='nndsvdar',
tol=tolerance).fit(X)
tend = time() - tstart
timeset['nndsvdar-nmf'].append(tend)
err['nndsvdar-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking random-nmf")
tstart = time()
m = NMF(n_components=30, init='random', max_iter=1000,
tol=tolerance).fit(X)
tend = time() - tstart
timeset['random-nmf'].append(tend)
err['random-nmf'].append(m.reconstruction_err_)
report(m.reconstruction_err_, tend)
gc.collect()
print("benchmarking alt-random-nmf")
tstart = time()
W, H = alt_nnmf(X, r=30, init='random', tol=tolerance)
tend = time() - tstart
timeset['alt-random-nmf'].append(tend)
err['alt-random-nmf'].append(np.linalg.norm(X - np.dot(W, H)))
report(norm(X - np.dot(W, H)), tend)
return timeset, err
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
axes3d
import matplotlib.pyplot as plt
samples_range = np.linspace(50, 500, 3).astype(np.int)
features_range = np.linspace(50, 500, 3).astype(np.int)
timeset, err = benchmark(samples_range, features_range)
for i, results in enumerate((timeset, err)):
fig = plt.figure('scikit-learn Non-Negative Matrix Factorization'
'benchmark results')
ax = fig.gca(projection='3d')
for c, (label, timings) in zip('rbgcm', sorted(results.iteritems())):
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z, rstride=8, cstride=8, alpha=0.3,
color=c)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
zlabel = 'Time (s)' if i == 0 else 'reconstruction error'
ax.set_zlabel(zlabel)
ax.legend()
plt.show()
| bsd-3-clause |
cheza/ActivityDynamics | lib/network.py | 1 | 23860 | from __future__ import division
from time import sleep
from lib.util import *
from config import config
import math
import random
import numpy as np
from numpy.lib.type_check import real, imag
import datetime
from graph_tool.all import *
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from random import shuffle, sample
from scipy.sparse.linalg.eigen.arpack import eigsh as largest_eigsh
import sys
class Network:
# Create Network Object with default values
# @params:
# directed = Is graph directed?
# graph_name = Name of graph, mainly used for storing plots and figures
# run = Number of random initializations for activity weights
# percentage = Percentage of nodes to randomly select for increasing/decreasing ratio
# converge_at = Minimum difference that has to be reached between two iterations to declare convergence
# ratios = The ratio for the activity dynamics.
# deltatau = The time that each iteration "represents".
# debug_level = The level of debug messages to be displayed.
# store_iterations = The interval for storing iterations (1 = all, 2 = every other, etc.)
def __init__(self, directed, graph_name, run=1, converge_at=1e-16, deltatau=0.01, runs = 1,
deltapsi=0.0001, debug_level=1, store_iterations=1, ratios=[], ratio_index = 0, tau_in_days=30,
num_nodes=None):
# default variables
self.name_to_id = {}
self.graph = Graph(directed=directed)
# variables used to store and load files
self.graph_name = graph_name
self.run = run
self.runs = runs
self.num_nodes = num_nodes
# variables used for activity dynamics modeling process
self.cur_iteration = 0
self.ratio_ones = [1.0] * self.graph.num_vertices()
self.deltatau = deltatau
self.deltapsi = deltapsi
self.tau_in_days = tau_in_days
self.converge_at = float(converge_at)
self.store_iterations = store_iterations
self.ratio = None
# variables used to specifically increase the ratio for certain nodes
self.random_nodes = []
# variable needed for adding and removing edges from graph
self.edge_list = None
# variable storing the eigenvalues for the network
self.top_eigenvalues = None
self.debug_level = debug_level
# empirical network variables
self.ratio_index = ratio_index
self.ratios = ratios
self.minimized_error_ratios = []
# synthetic network helper variables
self.converged = False
self.diverged = False
def calc_acs(self, ac_per_taus=None, min_ac=None):
if ac_per_taus is None:
self.a_cs = [max((np.mean(self.replies) + np.mean(self.posts)) / self.num_vertices, min_ac)] * (len(self.replies)-1)
else:
for i in xrange(len(self.replies)-ac_per_taus):
j = i + ac_per_taus
curr_ac = (np.mean(self.replies[i:j]) + np.mean(self.posts[i:j])) / self.num_vertices
for k in xrange(i+ac_per_taus):
self.a_cs.append(curr_ac)
self.set_ac(0)
def set_ac(self, index):
self.a_c = self.a_cs[index]
#def calc_ac(self, start_tau=0, end_tau=None, min_ac=40):
# replies = self.replies[start_tau:end_tau]
# posts = self.posts[start_tau:end_tau]
# return max((np.mean(replies) + np.mean(posts)) / self.num_vertices, min_ac)
def calc_max_posts_per_day(self, start_tau=0, end_tau=None):
return max(self.posts_per_user_per_day[start_tau:end_tau])
def calc_g_per_month(self):
return self.max_posts_per_day / (math.sqrt(self.a_c ** 2 + self.max_posts_per_day ** 2))
def calc_max_q(self):
return (self.max_posts_per_day * self.tau_in_days * self.num_vertices) / (2 * self.num_edges * self.g_per_month)
def get_empirical_input(self, path, start_tau=0, end_tau=None, ac_per_taus=None):
self.dx = []
self.apm = []
self.posts = []
self.replies = []
self.num_users = []
self.init_users = []
self.posts_per_user_per_day = []
self.a_cs = []
f = open(path, "rb")
for ldx, line in enumerate(f):
if ldx < 1:
continue
el = line.strip().split("\t")
try:
self.dx.append(float(el[1]))
except:
break
self.apm.append(float(el[2]))
self.posts.append(float(el[3]))
self.replies.append(float(el[4]))
try:
self.init_users.append(el[6].split(","))
except:
self.init_users.append(["dummy"])
num_users = float(el[5]) + 1
self.num_users.append(num_users)
self.posts_per_user_per_day.append(float(el[3])/num_users/30.0)
f.close()
self.calc_acs(ac_per_taus)
self.max_posts_per_day = self.calc_max_posts_per_day(start_tau, end_tau)
self.g_per_month = self.calc_g_per_month()
self.max_q = self.calc_max_q()
self.mu = self.max_q / self.a_c
self.deltapsi = self.mu
self.debug_msg("max_q: {}".format(self.max_q), level=1)
self.debug_msg("deltapsi: {}".format(self.deltapsi), level=1)
self.debug_msg("max_posts_per_day: {}".format(self.max_posts_per_day), level=1)
self.debug_msg("a_c: {}".format(self.a_c), level=1)
self.debug_msg("kappa_1: {}".format(self.k1), level=1)
# Creating all necessary folders for storing results, plots and figures
def create_folders(self):
folders = [config.graph_source_dir+"weights/"+self.graph_name+"/",
config.plot_dir + "weights_over_time/" + self.graph_name + "/",
config.plot_dir + "average_weights_over_tau/" + self.graph_name + "/",
config.plot_dir + "ratios_over_time/" + self.graph_name + "/"]
try:
for folder in folders:
if not os.path.exists(folder):
self.debug_msg("Creating folder: {}".format(folder))
os.makedirs(folder)
except Exception, e:
self.debug_msg("\x1b[41mERROR:: {}\x1b[00m".format(e))
def get_binary_filename(self, source_name, bin_type="GT", run=0):
if bin_type == "GT":
return config.graph_binary_dir+"GT/"+source_name+"/"+source_name+"_run_"+str(run)+".gt"
elif bin_type == "GML":
return config.graph_binary_dir+"GML/"+source_name+"/"+source_name+"_run_"+str(run)+".gml"
# Methods to manage the files to store the weights over all iterations
def open_weights_files(self):
folder = config.graph_source_dir + "weights/" + self.graph_name + "/"
wname = self.graph_name + "_" + str(self.store_iterations) +"_"+\
str(float(self.deltatau)).replace(".", "") + "_" + str(self.ratio).replace(".", "") + "_run_" + \
str(self.run) + "_weights.txt"
#iname = self.graph_name + "_" + str(self.store_iterations) +"_"+\
# str(float(self.deltatau)).replace(".", "") + "_" + str(self.ratio).replace(".", "") + "_run_" + \
# str(self.run) + "_intrinsic.txt"
#ename = self.graph_name + "_" + str(self.store_iterations) +"_"+\
# str(float(self.deltatau)).replace(".", "") + "_" + str(self.ratio).replace(".", "") + "_run_" + \
# str(self.run) + "_extrinsic.txt"
self.weights_file_path = folder+wname
#self.intrinsic_file_path = folder+iname
#self.extrinsic_file_path = folder+ename
self.weights_file = open(self.weights_file_path, "wb")
#self.intrinsic_file = open(self.intrinsic_file_path, "wb")
#self.extrinsic_file = open(self.extrinsic_file_path, "wb")
def write_weights_to_file(self):
self.weights_file.write(("\t").join(["%.8f" % float(x) for x in self.get_node_weights("activity")]) + "\n")
def write_summed_weights_to_file(self):
self.weights_file.write(str(sum(self.get_node_weights("activity"))) + "\n")
#self.intrinsic_file.write("0"+"\n")
#self.extrinsic_file.write("0"+"\n")
def close_weights_files(self):
self.weights_file.close()
#self.intrinsic_file.close()
#self.extrinsic_file.close()
def reduce_to_largest_component(self):
fl = label_largest_component(self.graph)
self.graph = GraphView(self.graph, vfilt=fl)
def set_graph_property(self, type, property, name):
a = self.graph.new_graph_property(type, property)
self.graph.graph_properties[name] = a
# Add calculated graph_properties to graph object
def add_graph_properties(self):
self.set_graph_property("object", self.deltatau, "deltatau")
self.set_graph_property("object", self.deltapsi, "deltapsi")
self.set_graph_property("float", self.cur_iteration, "cur_iteration")
self.set_graph_property("string", self.graph_name, "graph_name")
self.set_graph_property("int", self.store_iterations, "store_iterations")
self.set_graph_property("object", self.top_eigenvalues, "top_eigenvalues")
self.set_graph_property("object", self.ratios, "ratios")
self.set_graph_property("int", self.runs, "runs")
try:
self.set_graph_property("object", self.apm, "activity_per_month")
self.set_graph_property("object", self.dx, "delta_activity_per_month")
self.set_graph_property("object", self.posts, "posts")
self.set_graph_property("object", self.replies, "replies")
self.set_graph_property("float", self.a_c, "a_c")
self.set_graph_property("object", self.a_cs, "a_cs")
self.set_graph_property("object", self.max_q, "max_q")
self.set_graph_property("object", self.max_posts_per_day, "max_posts_per_day")
self.set_graph_property("object", self.g_per_month, "g_per_month")
except:
self.debug_msg(" -> INFO: Could not store empirical activities! ", level=1)
# Reset attributes between iterations / runs
def reset_attributes(self, ratio, temp_weights):
self.graph.vertex_properties["activity"].a = temp_weights
self.ratio = ratio
self.converged = False
self.diverged = False
self.cur_iteration = 0
# node weights getter
def get_node_weights(self, name):
return np.array(self.graph.vp[name].a)
# init empirical weight as average over all nodes
def init_empirical_activity(self):
initial_empirical_activity = self.apm[0]/(self.graph.num_edges()*2)/self.num_users[0]/self.a_c
init_nodes = self.init_users[0]
# reset activity!
for v in self.graph.vertices():
self.graph.vp["activity"][v] = 0.0
# randomly initiate minimal activity
for v_id in init_nodes:
n = self.graph.vertex(v_id)
self.graph.vp["activity"][n] = initial_empirical_activity
# node weights setter
def set_node_weights(self, name, weights):
self.graph.vertex_properties[name].a = weights
def update_node_weights(self, name, added_weight):
self.graph.vertex_properties[name].a += added_weight
def clear_all_filters(self):
self.graph.clear_filters()
self.num_vertices = self.graph.num_vertices()
self.ratio_ones = [1.0] * self.num_vertices
# creating random node weights
def add_node_weights(self, min=0.0, max=0.1, distribution=[1,0,0]):
self.debug_msg("Adding random weights between {} and {} to nodes.".format(min, max), level=0)
num_nodes = int(self.graph.num_vertices())
weights = self.graph.new_vertex_property("double")
weights_list = [random.uniform(min, max) for x in xrange(num_nodes)]
random.shuffle(weights_list)
for ndx, n in enumerate(self.graph.vertices()):
weights[n] = weights_list[ndx]
self.graph.vertex_properties["activity"] = weights
# creating random edge weights
def add_edge_weights(self, min=0.0, max=0.1):
self.debug_msg("Adding random weights between {} and {} to edges.".format(min, max), level=0)
for edge in self.graph.edges():
self.graph.edge_properties['activity'][edge] = random.uniform(min, max)
# eigenvalues getter
def get_eigenvalues(self):
return np.asarray(self.graph.graph_properties['top_eigenvalues'])
# store graph to gt
def store_graph(self, run, postfix=""):
self.debug_msg("Storing Graph")
path = config.graph_binary_dir + "/GT/{}/".format(self.graph_name)
try:
if not os.path.exists(path):
self.debug_msg("Created folder: {}".format(path))
os.makedirs(path)
except Exception as e:
self.debug_msg("\x1b[41mERROR:: {}\x1b[00m".format(e))
self.graph.save(path + "{}_run_{}{}.gt".format(self.graph_name, run, postfix))
# sample calculation of g(x)
def gx(self, q, a, ac):
return (q*a)/math.sqrt(ac**2+a**2)
def fx(self, x, ratio):
return -x*ratio
# plot g(x) function for multiple values
def plot_gx(self, min, max):
x = []
y = []
y2 = []
y3 = []
y4 = []
for weight in np.arange(min, max, 0.01):
y.append(self.gx(1.0, weight, 0.5))
y2.append(self.gx(1.0, weight, 2.0))
y3.append(self.gx(2.0, weight, 0.5))
y4.append(self.gx(2.0, weight, 2.0))
x.append(weight)
plt.figure()
plt.plot(x, y, alpha=1, label="$a_c=0.5$, $q=1.0$")
plt.plot(x, y2, alpha=1, label="$a_c=2.0$, $q=1.0$")
plt.plot(x, y3, alpha=1, label="$a_c=0.5$, $q=2.0$")
plt.plot(x, y4, alpha=1, label="$a_c=2.0$, $q=2.0$")
ax = plt.axes()
plt.xlabel("Node Activity ($a$)")
plt.ylabel("Values for $g(a)$")
plt.plot([-6, 6], [0, 0], 'k-', lw=0.5, alpha=0.8)
plt.plot([0.5, 0.5], [0, 3], 'k--', lw=0.5)
plt.plot([2.0, 2.0], [0, 3], 'k--', lw=0.5)
plt.plot([0.0, 6], [1.0, 1.0], 'k--', lw=0.5)
plt.plot([0.0, 6], [2.0, 2.0], 'k--', lw=0.5)
plt.text(-0.95, 0.95, "$q=1.0$", size=12)
plt.text(-0.95, 1.95, "$q=2.0$", size=12)
plt.text(0.1, -0.2, "$a_c=0.5$", size=12)
plt.text(1.6, -0.2, "$a_c=2.0$", size=12)
plt.plot([0, 0], [-3, 3], 'k-', lw=0.5, alpha=0.8)
plt.title("Values for $g(a)$ with weights from ${}$ to ${}$".format(min, max))
ax.grid(color="gray")
plt.ylim(-3, 3)
plt.legend(loc="upper left")
plt.savefig(config.plot_dir + "functions/" + self.graph_name + "_gx.png")
plt.close("all")
def get_fx_weights(self, min, max, lam):
x = []
y = []
for weight in np.arange(min, max+0.1, 0.1):
y.append(self.fx(weight, lam))
x.append(weight)
return x, y
# plot f(x)
def plot_fx(self, min, max, k=1):
plt.figure()
x,y = self.get_fx_weights(min, max, 1.0)
plt.plot(x, y, alpha=1, label="$\lambda$=$1.0$")
x,y = self.get_fx_weights(min, max, 0.5)
plt.plot(x, y, alpha=1, label="$\lambda$=$0.5$")
x,y = self.get_fx_weights(min, max, 0.1)
plt.plot(x, y, alpha=1, label="$\lambda$=$0.1$")
x,y = self.get_fx_weights(min, max, 1.5)
plt.plot(x, y, alpha=1, label="$\lambda$=$1.5$")
ax = plt.axes()
plt.xlabel("Node Activity (a)")
plt.ylabel("Values for f(a)")
plt.title("Values for f(a) with weights from ${}$ to ${}$".format(min, max))
ax.grid(color="gray")
plt.plot([-1, 1], [0, 0], 'k-', lw=0.5, alpha=0.8)
plt.plot([0, 0], [-1.5, 1.5], 'k-', lw=0.5, alpha=0.8)
plt.legend()
plt.savefig(config.plot_dir + "functions/" + self.graph_name + "_fx.png")
plt.close("all")
# plot f(x)
def plot_fx_weight(self, min, max, k=0.5):
x = []
prev_val = 10
y = [prev_val]
for i in xrange(10):
prev_val *= k
y.append(prev_val)
x.append(i)
x.append(10)
plt.figure()
plt.plot(x, y, alpha=1)
ax = plt.axes()
plt.xlabel("Time $t$")
plt.ylabel("Values for f(a)")
plt.title("Values for f(a) with weight=${}$ and $\lambda$=${}$".format(10, 0.5))
ax.grid(color="gray")
plt.savefig(config.plot_dir + "functions/" + self.graph_name + "_fx_weight.png")
plt.close("all")
# getter for laplacian matrix (not needed)
def get_laplacian(self):
return laplacian(self.graph)
# calculate eigenvector centrality
def calc_ev_centrality(self, max_iter, selector):
try:
return self.graph.vertex_properties[selector]
except:
ev, ev_centrality = eigenvector(self.graph, weight=None, max_iter = max_iter)
return ev_centrality
def calculate_ratios(self):
for i in xrange(len(self.apm)-1):
activity_current = self.apm[i]
activity_next = activity_current-self.dx[i]
self.ratio = self.k1 - math.log(activity_next/activity_current) / self.deltapsi
self.ratio -= 0.03 * activity_current / (self.a_c * self.num_vertices)
self.ratios.append(self.ratio)
self.debug_msg("ratios ({}): {}".format(len(self.ratios), self.ratios), level=1)
def set_ratio(self, index):
self.ratio_index = index
self.ratio = self.ratios[index]
def activity_dynamics(self, store_weights=False, empirical=False):
# Collect required input
activity_weight = np.asarray(self.get_node_weights("activity"))
# Calculate deltax
ratio_ones = (self.ratio * np.asarray(self.ones_ratio))
intrinsic_decay = self.activity_decay(activity_weight, ratio_ones)
extrinsic_influence = self.peer_influence(activity_weight)
activity_delta = (intrinsic_decay + extrinsic_influence) * self.deltatau
t = 1.0
# Check if already converged/diverged
if self.cur_iteration % self.store_iterations == 0:
t = np.dot(activity_delta, activity_delta)
# Debug output & convergence/divergence criteria check
if t < self.converge_at and not empirical:
self.debug_msg(" \x1b[32m>>>\x1b[00m Simulation for \x1b[32m'{}'\x1b[00m with \x1b[34mratio={}\x1b[00m and "
"\x1b[34mdtau={}\x1b[00m \x1b[34mconverged\x1b[00m at \x1b[34m{}\x1b[00m with "
"\x1b[34m{}\x1b[00m".format(self.graph_name, self.ratio, self.deltatau, self.cur_iteration+1,
t), level=1)
self.converged = True
if (t == float("Inf") or t == float("NaN")) and not empirical:
self.debug_msg(" \x1b[31m>>>\x1b[00m Simulation for \x1b[32m'{}'\x1b[00m with \x1b[34mratio={}\x1b[00m and "
"\x1b[34mdtau={}\x1b[00m \x1b[31mdiverged\x1b[00m at \x1b[34m{}\x1b[00m with "
"\x1b[34m{}\x1b[00m".format(self.graph_name, self.ratio, self.deltatau, self.cur_iteration+1,
t), level=1)
self.diverged = True
# Set new weights
self.update_node_weights("activity", activity_delta)
# Store weights to file
if ((store_weights and self.cur_iteration % self.store_iterations == 0) and not empirical) or ((self.converged or self.diverged)
and not empirical):
self.weights_file.write(("\t").join(["%.8f" % x for x in self.get_node_weights("activity")]) + "\n")
#self.intrinsic_file.write(("\t").join(["%.8f" % x for x in intrinsic_decay + activity_weight]) + "\n")
#self.extrinsic_file.write(("\t").join(["%.8f" % x for x in extrinsic_influence + activity_weight]) + "\n")
elif ((store_weights and self.cur_iteration % self.store_iterations == 0) and empirical) or ((self.converged or self.diverged)
and empirical):
self.weights_file.write(str(sum(activity_weight + activity_delta)) + "\n")
#self.intrinsic_file.write(str(abs(sum(intrinsic_decay))*self.deltatau) + "\n")
#self.extrinsic_file.write(str(abs(sum(extrinsic_influence))*self.deltatau) + "\n")
# Increment current iteration counter
self.cur_iteration += 1
def peer_influence(self, x):
pi = ((1.0 * x)/(np.sqrt(1.0+x**2)))
return pi * self.A
def activity_decay(self, x, ratio):
return -x*ratio
def debug_msg(self, msg, level=0):
if self.debug_level <= level:
print " \x1b[35m-NWK-\x1b[00m [\x1b[36m{}\x1b[00m][\x1b[32m{}\x1b[00m] \x1b[33m{}\x1b[00m".format(
datetime.datetime.now().strftime("%H:%M:%S"), self.run, msg)
def update_binary_graph(self, rand_iter, save_specific=True):
# Method needed to avoid race condition!
try:
self.store_graph(rand_iter, save_specific=True)
except Exception as e:
self.debug_msg(e.message, level=0)
self.debug_msg(" ### Sleeping for 100 seconds before trying to store again!", level=0)
sleep(100)
self.update_binary_graph(rand_iter, save_specific)
def debug_gt(self):
gps = self.graph.gp.keys()
vps = self.graph.vp.keys()
eps = self.graph.ep.keys()
self.debug_msg(" >> Inspecting graph properties: {}".format((", ").join(gps)), level=1)
for gp_k in gps:
self.debug_msg(" \x1b[36m- {}:\x1b[00m {}".format(gp_k, self.graph.gp[gp_k]), level=1)
self.debug_msg(" >> Inspecting vertex properties: {}".format((", ").join(vps)), level=1)
for vp_k in vps:
self.debug_msg(" \x1b[32m- {}:\x1b[00m {}".format(vp_k, self.graph.vp[vp_k]), level=1)
self.debug_msg(" >> Inspecting edge properties: {}".format((", ").join(eps)), level=1)
for ep_k in eps:
self.debug_msg(" \x1b[37m- {}:\x1b[00m {}".format(ep_k, self.graph.ep[ep_k]), level=1)
print "Sum Posts: ", sum(self.graph.gp["posts"])
print "Sum Replies: ", sum(self.graph.gp["replies"])
def prepare_eigenvalues(self):
self.top_eigenvalues = self.get_eigenvalues()
self.k1 = max(self.top_eigenvalues)
def load_graph_save(self, fpath):
try:
self.load_graph(fpath)
except Exception as e:
self.debug_msg(e.message, level=0)
self.debug_msg(" ### Sleeping for 100 seconds before trying to store again!", level=0)
sleep(100)
self.load_graph(fpath)
def load_graph(self, fpath):
self.debug_msg("Loading GT", level=0)
self.graph = load_graph(fpath)
remove_self_loops(self.graph)
remove_parallel_edges(self.graph)
self.debug_msg(" --> Creating ones vector", level=0)
self.ones_ratio = [1.0] * self.graph.num_vertices()
self.debug_msg(" --> Getting Adjacency Matrix", level=0)
self.A = adjacency(self.graph, weight=None)
self.num_vertices = self.graph.num_vertices()
self.num_edges = self.graph.num_edges()
self.debug_msg(" --> Counted {} vertices".format(self.num_vertices), level=0)
self.debug_msg(" --> Counted {} edges".format(self.num_edges), level=0) | gpl-2.0 |
exowanderer/Charge-Carrier-Trapping-Comparison | Charge Carrier Trapping Experiment.py | 1 | 37127 |
# coding: utf-8
# # Charge Carrier Trapping Experiment
#
# CCT = Charge Carrier Trapping - This is a test of comparing the Zhou et al 2017 results with a data driven analysis using multinest
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
from pandas import read_csv, DataFrame, concat
from time import time
from exoparams import PlanetParams
from astropy import units as u
# In[ ]:
test_data = read_csv('test_data.dat')
test_data
# **Check if the data makes sense as is**
# In[ ]:
fig = figure(figsize=(10,10))
errorbar(test_data['DeltaPhase'], test_data['Flux'] , test_data['Sigma'], fmt='o')
fig = figure(figsize=(10,10))
errorbar(np.arange(test_data['DeltaPhase'].size), test_data['Flux'] , test_data['Sigma'], fmt='o')
# errorbar(h38_v2_orbitPhased['DeltaPhase'], h38_v2_orbitPhased['Flux'] , h38_v2_orbitPhased['Sigma'], fmt='o')
# errorbar(w67_v1_orbitPhased['DeltaPhase'], w67_v1_orbitPhased['Flux'] , w67_v1_orbitPhased['Sigma'], fmt='o')
# In[ ]:
for k in test_data['OrbitNumber'].unique():
orbitNNow = test_data['OrbitNumber'] == k
errorbar(test_data['DeltaPhase'][orbitNNow] , test_data['Flux'][orbitNNow] , test_data['Sigma'][orbitNNow], fmt='o')
# In[ ]:
def zhou_model(params):
# Zhou et al. 2017
# The exponential ramp timescale is detector dependennt, and therfore uniform across all observations
# But the difference from orbit to orbit is predicted to be related
# ONLY to the inital number of charge traps populated at the start of the each ramp
# BIG ASSUMPTION
flux = ydata.copy() # I assume that what Zhou means by `flux` is either the WLC or avg WLC value
# flux = ydata.copy() / 128 # I assume that what Zhou means by `flux` is either the WLC or avg WLC value
E0fast = params[0] # Orbit 0; Start with per frame; may want per pixel
E0slow = params[1] # Orbit 0; Start with per frame; may want per pixel
# Separate out the delta-E0 components per orbit
# Keep dE0fast[0] and dE0slow[0] == 0.0 because they correspond to E0fast and E0slow (initial)
dE0fast = np.zeros(nOrbits)
dE0slow = np.zeros(nOrbits)
for k in range(1, nOrbits):
print(k,2*k, 2*k+1,len(params))
dE0fast[k] = params[2*k]
dE0slow[k] = params[2*k+1]
# From Table 3 of Zhou et al. 2017
ETotFast = 270.6
etaFast = 0.006863
tauFast = 224.8
ETotSlow = 1320.0
etaSlow = 0.01311
tauSlow = 2.45e4
coeffFast0= (etaFast * flux / ETotFast + tauFast**-1)
coeffSlow0= (etaSlow * flux / ETotSlow + tauSlow**-1)
coeffFast1= etaFast*flux / coeffFast0
coeffSlow1= etaSlow*flux / coeffSlow0
Efast = zeros(orbit_phase.shape)
Eslow = zeros(orbit_phase.shape)
for k in range(nOrbits):
orbitNow = where(orbitNumber == k)[0]
Efast[orbitNow] = coeffFast1 + (E0fast + dE0fast[k] - coeffFast1)*exp(-coeffFast0 * tphase[orbitNow])
Eslow[orbitNow] = coeffSlow1 + (E0slow + dE0slow[k] - coeffSlow1)*exp(-coeffSlow0 * tphase[orbitNow])
dEFastDtP = etaFast * flux * (ETotFast - Efast) / ETotFast
dEFastDtN = -Efast / tauFast
dESlowDtP = etaSlow * flux * (ETotSlow - Eslow) / ETotSlow
dESlowDtN = -Eslow / tauSlow
lambda phase: 1 - dEFastDtP - dESlowDtP - dEFastDtP - dESlowDtP
# # PyMultiNest Demo
# In[ ]:
from __future__ import absolute_import, unicode_literals, print_function
import pymultinest
import math
import os
import threading, subprocess
from sys import platform
if not os.path.exists("chains"): os.mkdir("chains")
# In[ ]:
get_ipython().magic('matplotlib inline')
from pylab import *;ion()
# from pymultinest.solve import Solver,solve
from numpy import pi, sin, cos, linspace
def single_exponential_model(cube):
alpha = cube[0]
beta = cube[1]
gamma = cube[2]
return lambda xdata: alpha - beta*exp(-gamma*xdata)
def double_exponential_model(cube):
alpha = cube[0]
beta = cube[1]
gamma = cube[2]
delta = cube[3]
epsilon = cube[4]
return lambda xdata: alpha - beta*exp(-gamma*xdata) - delta*exp(-epsilon*xdata)
def straight_line(cube):
offset = cube[0]
slope = cube[1]
return lambda abscissa: offset + slope * abscissa
def sine_wave(cube):
amp = cube[0]
period = cube[1]
return lambda abscissa: amp*sin(2*pi / period * abscissa)
# ** Generate Fake Data for Algorithm Testing **
# In[ ]:
np.random.seed(0)
param0_test= 1#0.05
param1_test= .1#5*pi
param2_test= 10.0
yunc_test = 0.01
nPts_test = int(50)
nThPts_test= int(1e3)
xmin_test = -0.0#*pi
xmax_test = 1.0#*pi
dx_test = 0.01*(xmax_test - xmin_test)
model_test = single_exponential_model
# model_test = sine_wave
# model_test = straight_line
yuncs_test = np.random.normal(yunc_test, 1e-2 * yunc_test, nPts_test)
thdata_test= np.linspace(xmin_test-dx_test, xmax_test+dx_test, nThPts_test)
xdata_test = np.random.uniform(xmin_test, xmax_test, nPts_test)
xdata_test = sort(xdata_test)
ydata_test = model_test([param0_test,param1_test,param2_test])(xdata_test)
yerr_test = np.random.normal(0, yuncs_test, nPts_test)
zdata_test = ydata_test + yerr_test
figure(figsize=(10,10))
plot(thdata_test, model_test([param0_test,param1_test,param2_test])(thdata_test))
errorbar(xdata_test, zdata_test, yunc_test*ones(zdata_test.size), fmt='o')
# # Single Exponential Model
# In[ ]:
nThPts = int(1e3)
model_SEM = single_exponential_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_SEM = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_SEM_init= 1.0 # by defintion
param1_SEM_init= (ydata.max() - ydata.min())#/100
param2_SEM_init= round(5/(xdata.max() - xdata.min()))
print(param0_SEM_init, param1_SEM_init, param2_SEM_init)
figure(figsize=(10,10))
plot(thdata_SEM, model_SEM([param0_SEM_init,param1_SEM_init,param2_SEM_init])(thdata_SEM))
errorbar(xdata, ydata, yuncs, fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
model_SEM = single_exponential_model
parameters_SEM = ['max', 'amp1', 'scale1']
def myprior_SEM(cube, ndim, nparams):
cube0_width = 1e-3
cube[0] = cube[0] * cube0_width + (1 - 0.5*cube0_width)# - 10# U(0,2)
cube[1] = cube[1] # - 10# U(0,1) -- default
cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000)
def myloglike_SEM(cube, ndim, nparams):
chi = 1.
# print "cube", [cube[i] for i in range(ndim)], cube
# for i in range(ndim):
# chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.)
# print "returning", math.pow(2. + chi, 5)
modelNow = model_SEM(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_SEM = len(parameters_SEM)
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'single_exponential_model'
outputfiles_basename = 'chains/' + planetName + '-' + visitName + '-' + modelName + '-'
start = time()
plt.figure(figsize=(5*n_params_SEM, 5*n_params_SEM))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_SEM, outputfiles_basename=outputfiles_basename); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_SEM, myprior_SEM, n_params_SEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename);
# ok, done. Stop our progress watcher
progress.stop();
print('SEM took', time() - start, 'seconds')
# lets analyse the results
a_SEM = pymultinest.Analyzer(n_params = n_params_SEM, outputfiles_basename=outputfiles_basename);
s_SEM = a_SEM.get_stats();
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_SEM.outputfiles_basename, 'w') as f:
json.dump(parameters_SEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_SEM.outputfiles_basename, mode='w') as f:
json.dump(s_SEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] ))
print("Global Evidence:\t%.3f +- %.3f" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_SEM = pymultinest.PlotMarginalModes(a_SEM)
plt.figure(figsize=(5*n_params_SEM, 5*n_params_SEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_SEM):
plt.subplot(n_params_SEM, n_params_SEM, n_params_SEM * i + i + 1)
p_SEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_SEM[i])
for j in range(i):
plt.subplot(n_params_SEM, n_params_SEM, n_params_SEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p_SEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30)
plt.xlabel(parameters_SEM[i])
plt.ylabel(parameters_SEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_SEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_SEM, 1, i+1)
p_SEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_SEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_SEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_SEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_SEM.analyser.get_best_fit()['parameters'], [param0_SEM_init, param1_SEM_init, param2_SEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_SEM, model_SEM([param0_SEM_init,param1_SEM_init, param2_SEM_init])(thdata_SEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_SEM, model_SEM(p_SEM.analyser.get_best_fit()['parameters'])(thdata_SEM), label='PMN Model')
legend(loc=0)
# In[ ]:
p_SEM.analyser.get_stats()
# # Unrestricted Double Exponential Model
# In[ ]:
nThPts= int(1e3)
model_UDEM = double_exponential_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_UDEM = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_UDEM_init = 1.0 # by defintion
param1_UDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param2_UDEM_init = round(5/(xdata.max() - xdata.min()))
param3_UDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param4_UDEM_init = round(5/(xdata.max() - xdata.min()))
print(param0_UDEM_init, param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init)
figure(figsize=(10,10))
plot(thdata_UDEM, model_UDEM([param0_UDEM_init,param1_UDEM_init,param2_UDEM_init, param3_UDEM_init, param4_UDEM_init])(thdata_UDEM))
errorbar(xdata, ydata, yuncs, fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
model_UDEM = double_exponential_model
parameters_UDEM = ['max', 'amp1', 'scale1', 'amp2', 'scale2']
# def myprior_RDEM(cube, ndim, nparams):
# cube[0] = cube[0] * 1e-3 + (1 - 1e-3/2)# - 10# U(0,2)
# cube[1] = -cube[1] * 5e-3 + 5e-4 # - 10# U(0,1) -- default
# cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000)
# cube[3] = cube[3] * 5e-3 + 5e-4# - 10# U(0,1) -- default
# cube[4] = cube[4] * 1e4 - 5e3# - 1000 # U(0,2000)
def myprior_UDEM(cube, ndim, nparams):
cube[0] = cube[0] * 1e-2 + (1 - 1e-2/2)# - 10# U(0,2)
cube[1] = cube[1] * 2 - 2/2 # - 10# U(0,1) -- default
cube[2] = cube[2] * 1e4 - 1e4/2# - 1000 # U(0,2000)
cube[3] = cube[3] * 2 - 2/2# - 10# U(0,1) -- default
cube[4] = cube[4] * 1e4 - 1e4/2# - 1000 # U(0,2000)
def myloglike_UDEM(cube, ndim, nparams):
modelNow = model_UDEM(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
start = time()
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_UDEM = len(parameters_UDEM)
savedir = 'chains'
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'unrestricted_double_exponential_model'
outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-'
plt.figure(figsize=(5*n_params_UDEM, 5*n_params_UDEM))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_UDEM, outputfiles_basename=outputfiles_basename)
progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_UDEM, myprior_UDEM, n_params_UDEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename)
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a_UDEM = pymultinest.Analyzer(n_params = n_params_UDEM, outputfiles_basename=outputfiles_basename)
s_UDEM = a_UDEM.get_stats()
print('UDEM took', time() - start, 'seconds')
# fig = plt.gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim()
# # ax.set_xscale("log", nonposx='clip')
# # ax.set_yscale("log", nonposy='clip')
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_UDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_UDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_UDEM.outputfiles_basename, mode='w') as f:
json.dump(s_UDEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_UDEM['nested sampling global log-evidence'], s_UDEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_UDEM = pymultinest.PlotMarginalModes(a_UDEM)
plt.figure(figsize=(5*n_params_UDEM, 5*n_params_UDEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_UDEM):
plt.subplot(n_params_UDEM, n_params_UDEM, n_params_UDEM * i + i + 1)
p_UDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_UDEM[i])
for j in range(i):
plt.subplot(n_params_UDEM, n_params_UDEM, n_params_UDEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
# p_UDEM.plot_conditional(i, j, with_ellipses=False, with_points=False, grid_points=30)
p_UDEM.plot_conditional(i, j, with_ellipses=False , with_points=False , grid_points=30, only_interpolate=False, use_log_values=False, marginalization_type='sum')
plt.xlabel(parameters_UDEM[i])
plt.ylabel(parameters_UDEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
axes_colors = rcParams['axes.prop_cycle'].by_key()['color']
nColors = len(axes_colors)
# In[ ]:
minLogE, maxLogE = min(a_UDEM.get_equal_weighted_posterior().T[-1]), max(a_UDEM.get_equal_weighted_posterior().T[-1])
rangeLogE = maxLogE - minLogE
minLogE, maxLogE, rangeLogE, nColors
# In[ ]:
from astroML.plotting import hist
from statsmodels.robust import scale
hist(a_UDEM.get_equal_weighted_posterior().T[-1], bins='blocks')
# In[ ]:
nSig = 10
mad_logE = scale.mad(a_UDEM.get_equal_weighted_posterior().T[-1])
med_logE = median(a_UDEM.get_equal_weighted_posterior().T[-1])
madBins = [med_logE - nSig*mad_logE for nSig in range(nColors)]
# In[ ]:
fig = figure(figsize=(15,15));
logEchain = a_UDEM.get_equal_weighted_posterior().T[-1]
mad_logE = scale.mad(a_UDEM.get_equal_weighted_posterior().T[-1])
med_logE = median(a_UDEM.get_equal_weighted_posterior().T[-1])
madBins = [med_logE - nSig*mad_logE for nSig in range(nColors+1)]
for k in range(5):
ax = fig.add_subplot(5,1,k+1);
for nSig in range(nColors):
for klogE in range(logEchain.size):
if logEchain[klogE] > madBins[nSig] or logEchain[klogE] < madBins[nSig+1]:
ax.plot(a_UDEM.get_equal_weighted_posterior().T[k], logEchain,'o', color = axes_colors[nSig], alpha=0.1);
fig.canvas.draw();
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_UDEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_UDEM, 1, i+1)
p_UDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_UDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_UDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_UDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_UDEM.analyser.get_best_fit()['parameters'], [param0_UDEM_init, param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_UDEM, model_UDEM([param0_UDEM_init,param1_UDEM_init, param2_UDEM_init, param3_UDEM_init, param4_UDEM_init])(thdata_UDEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_UDEM, model_UDEM(p_UDEM.analyser.get_best_fit()['parameters'])(thdata_UDEM), label='PMN UDEM Model')
legend(loc=0)
# In[ ]:
p_UDEM.analyser.get_stats()
# # Restricted Double Exponential Model
# In[ ]:
nThPts= int(1e3)
model_RDEM = double_exponential_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_RDEM = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_RDEM_init = 1.0 # by defintion
param1_RDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param2_RDEM_init = round(5/(xdata.max() - xdata.min()))
param3_RDEM_init = 0.5*(ydata.max() - ydata.min())#/100
param4_RDEM_init = round(5/(xdata.max() - xdata.min()))
print(param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init)
figure(figsize=(10,10))
plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init,param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM))
errorbar(xdata, ydata, yuncs, fmt='o')
def show(filepath):
""" open the output (pdf) file for the user """
if os.name == 'mac' or platform == 'darwin': subprocess.call(('open', filepath))
elif os.name == 'nt' or platform == 'win32': os.startfile(filepath)
elif platform.startswith('linux') : subprocess.call(('xdg-open', filepath))
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
model_RDEM = double_exponential_model
parameters_RDEM = ['max', 'amp1', 'scale1', 'amp2', 'scale2']
def myprior_RDEM(cube, ndim, nparams):
cube[0] = cube[0] * 1e-3 + (1 - 1e-3/2)# - 10# U(0,2)
cube[1] = -cube[1] * 5e-3 + 5e-4 # - 10# U(0,1) -- default
cube[2] = cube[2] * 1e4 - 5e3# - 1000 # U(0,2000)
cube[3] = cube[3] * 5e-3 + 5e-4# - 10# U(0,1) -- default
cube[4] = cube[4] * 1e4 - 5e3# - 1000 # U(0,2000)
def myloglike_RDEM(cube, ndim, nparams):
chi = 1.
# print "cube", [cube[i] for i in range(ndim)], cube
# for i in range(ndim):
# chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.)
# print "returning", math.pow(2. + chi, 5)
modelNow = model_RDEM(cube)(xdata)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
start = time()
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_RDEM = len(parameters_RDEM)
savedir = 'chains'
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'restricted_double_exponential_model'
outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-'
plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_RDEM, outputfiles_basename=outputfiles_basename); progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_RDEM, myprior_RDEM, n_params_RDEM, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename)
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a_RDEM = pymultinest.Analyzer(n_params = n_params_RDEM, outputfiles_basename=outputfiles_basename)
s_RDEM = a_RDEM.get_stats()
print('RDEM took', time() - start, 'seconds')
# fig = plt.gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim()
# # ax.set_xscale("log", nonposx='clip')
# # ax.set_yscale("log", nonposy='clip')
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_RDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f:
json.dump(s_RDEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_RDEM = pymultinest.PlotMarginalModes(a_RDEM)
plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_RDEM):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * i + i + 1)
p_RDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
for j in range(i):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p_RDEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30)
plt.xlabel(parameters_RDEM[i])
plt.ylabel(parameters_RDEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_RDEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_RDEM, 1, i+1)
p_RDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_RDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_RDEM.analyser.get_best_fit()['parameters'], [param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN Model')
legend(loc=0)
# In[ ]:
p_RDEM.analyser.get_stats()
# # Compare Unrestricted Double, Restricted Double, and Single Exponential
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_SEM.outputfiles_basename, 'w') as f:
json.dump(parameters_SEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_SEM.outputfiles_basename, mode='w') as f:
json.dump(s_SEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("SEM Global Evidence:\t\t%.3f +- %.3f" % ( s_SEM['nested sampling global log-evidence'], s_SEM['nested sampling global log-evidence error'] ))
# store name of parameters, always useful
with open('%sparams.json' % a_UDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_UDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_UDEM.outputfiles_basename, mode='w') as f:
json.dump(s_UDEM, f, indent=2)
# print()
# print("-" * 30, 'ANALYSIS', "-" * 30)
print("UDEM Global Evidence:\t\t%.3f +- %.3f" % ( s_UDEM['nested sampling global log-evidence'], s_UDEM['nested sampling global log-evidence error'] ))
# store name of parameters, always useful
with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_RDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f:
json.dump(s_RDEM, f, indent=2)
# print()
# print("-" * 30, 'ANALYSIS', "-" * 30)
print("RDEM Global Evidence:\t\t%.3f +- %.3f" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] ))
figure(figsize=(10,10))
plot(thdata_UDEM, model_SEM([param0_SEM_init,param1_SEM_init, param2_SEM_init])(thdata_SEM), '.', label='Initial SEM Model')
plot(thdata_UDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), '--', label='Initial DEM Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_SEM, model_SEM(p_SEM.analyser.get_best_fit()['parameters'])(thdata_SEM), label='PMN SEM Model')
plot(thdata_UDEM, model_UDEM(p_UDEM.analyser.get_best_fit()['parameters'])(thdata_UDEM), label='PMN UDEM Model')
plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN RDEM Model')
legend(loc=0)
# # Polynomials
# In[ ]:
figure(figsize=(20,20))
from numpy.polynomial import polynomial
def time_polynomial(params):
# modelOut = np.zeros(tdata.size)
# for kc, coeff in enumerate(params):
# modelOut += coeff * tdata**kc
if len(params):
return lambda tdata: polynomial.polyval(tdata, params)
else:
return lambda tdata: zeros(tdata.size)
def orbital_polynomial(params):
# modelOut = np.zeros(xdata.size)
# for kc, coeff in enumerate(params):
# modelOut += coeff * xdata**kc
# return modelOut
if len(params):
return lambda odata: polynomial.polyval(odata, params)
else:
return lambda odata: zeros(odata.size)
def wavelength_polynomial(params):
# modelOut = np.zeros(ldata.size)
# for kc, coeff in enumerate(params):
# modelOut += coeff * ldata**kc
# return modelOut
if len(params):
return lambda ldata: polynomial.polyval(ldata, params)
else:
return lambda ldata: zeros(ldata.size)
def polynomial_model(params):
params_list= list(params.copy())[::-1]
timeParams = array([params_list.pop() for _ in range(nTimeCoeffs)])
orbitParams = array([params_list.pop() for _ in range(nOrbitCoeffs)])
waveParams = array([params_list.pop() for _ in range(nWaveCoeffs)])
return lambda tdata, odata, ldata: time_polynomial(timeParams)(tdata) + orbital_polynomial(orbitParams)(odata) + wavelength_polynomial(waveParams)(ldata)
tdata, xdata, ldata = np.random.uniform(-10,10,(3,100))
tdata.sort()
xdata.sort()
ldata.sort()
# tdata, xdata, ldata = [np.linspace(-10,10,100) for _ in range(3)]
for nTimeCoeffs in range(4):
for nOrbitCoeffs in range(4):
for nWaveCoeffs in range(4):
params = np.random.uniform(-20,20,nTimeCoeffs+nOrbitCoeffs+nWaveCoeffs)
plot(tdata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0)
plot(xdata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0)
plot(ldata, polynomial_model(params)(tdata,xdata,ldata),'.', alpha=0.5, mew=0)
# In[ ]:
nThPts= int(1e3)
model_Poly = polynomial_model
xdata = test_data['DeltaPhase']
ydata = test_data['Flux']
yuncs = test_data['Sigma']
nTimeCoeffs = 2
nOrbitCoeffs = 3
nWaveCoeffs = 0
h38PlanetPhase = test_data_input_input['Phase']
h38HSTPhase = test_data['DeltaPhase']
xmin, xmax = xdata.min(), xdata.max()
dx = (xmax - xmin)/100
thdata_Poly = np.linspace(xmin-dx, xmax+dx, nThPts)
param0_Poly_init = 1.0 # by defintion
param1_Poly_init = 1.0
param2_Poly_init = 1.0
param3_Poly_init = 1.0
param4_Poly_init = 1.0
print(param0_Poly_init, param1_Poly_init, param2_Poly_init, param3_Poly_init, param4_Poly_init)
figure(figsize=(10,10))
plot(thdata_Poly, model_Poly([param0_Poly_init,param1_Poly_init,param2_Poly_init, param3_Poly_init, param4_Poly_init])(thdata_Poly))
errorbar(xdata, ydata, yuncs, fmt='o')
# In[ ]:
# our probability functions
# Taken from the eggbox problem.
# model = sine_wave
# parameters = ["amp", "period"]
# model = straight_line
# parameters = ["offset", "slope"]
nTimeCoeffs = 2
nOrbitCoeffs = 3
nWaveCoeffs = 0
h38PlanetPhase = test_data_input_input['Phase']
h38HSTPhase = test_data['DeltaPhase']
model_Poly = polynomial_model
parameters_Poly = ['timeIntercept', 'timeSlope', 'orbitIntercept', 'orbitSlope', 'orbitQuadratic']
cubeKWith = 1e3
def myprior_Poly(cube, ndim, nparams):
for k in len(cube):
cube[k] = cube[k] * cubeKWith - 0.5*cubeKWith
def myloglike_Poly(cube, ndim, nparams):
chi = 1.
# print "cube", [cube[i] for i in range(ndim)], cube
# for i in range(ndim):
# chi *= -0.5 * ((cube[i] - 0.2) / 0.1)**2#math.cos(cube[i] / 2.) * math.sin(cube[i] / 2.)
# print "returning", math.pow(2. + chi, 5)
modelNow = model_Poly(cube)(times, HSTPhase, 0)
return -0.5*((modelNow - ydata)**2. / yuncs**2.).sum()
# In[ ]:
if not os.path.exists("chains"): os.mkdir("chains")
start = time()
# number of dimensions our problem has
# parameters = ["x", "y"]
n_params_Poly = len(parameters_Poly)
savedir = 'chains'
planetName = 'HAT38'
visitName = 'visit1'
modelName = 'polynomial_model'
outputfiles_basename = savedir + '/' + planetName + '-' + visitName + '-' + modelName + '-'
plt.figure(figsize=(5*n_params_Poly, 5*n_params_Poly))
# we want to see some output while it is running
progress = pymultinest.ProgressPlotter(n_params = n_params_Poly, outputfiles_basename=outputfiles_basename)
progress.start()
# threading.Timer(2, show, ["chains/2-phys_live.points.pdf"]).start() # delayed opening
# run MultiNest
pymultinest.run(myloglike_Poly, myprior_Poly, n_params_Poly, importance_nested_sampling = False, resume = False, verbose = True, sampling_efficiency = 'model', n_live_points = 1000, outputfiles_basename=outputfiles_basename)
# ok, done. Stop our progress watcher
progress.stop()
# lets analyse the results
a_Poly = pymultinest.Analyzer(n_params = n_params_Poly, outputfiles_basename=outputfiles_basename)
s_Poly = a_Poly.get_stats()
print('Polynomial took', time() - start, 'seconds')
# fig = plt.gcf()
# axs = fig.get_axes()
# for ax in axs:
# ax.set_ylim()
# # ax.set_xscale("log", nonposx='clip')
# # ax.set_yscale("log", nonposy='clip')
# In[ ]:
import json
# store name of parameters, always useful
with open('%sparams.json' % a_RDEM.outputfiles_basename, 'w') as f:
json.dump(parameters_RDEM, f, indent=2)
# store derived stats
with open('%sstats.json' % a_RDEM.outputfiles_basename, mode='w') as f:
json.dump(s_RDEM, f, indent=2)
print()
print("-" * 30, 'ANALYSIS', "-" * 30)
print("Global Evidence:\t%.15e +- %.15e" % ( s_RDEM['nested sampling global log-evidence'], s_RDEM['nested sampling global log-evidence error'] ))
# In[ ]:
import matplotlib.pyplot as plt
plt.clf()
# Here we will plot all the marginals and whatnot, just to show off
# You may configure the format of the output here, or in matplotlibrc
# All pymultinest does is filling in the data of the plot.
# Copy and edit this file, and play with it.
p_RDEM = pymultinest.PlotMarginalModes(a_RDEM)
plt.figure(figsize=(5*n_params_RDEM, 5*n_params_RDEM))
#plt.subplots_adjust(wspace=0, hspace=0)
for i in range(n_params_RDEM):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * i + i + 1)
p_RDEM.plot_marginal(i, with_ellipses = False, with_points = False, grid_points=50)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
for j in range(i):
plt.subplot(n_params_RDEM, n_params_RDEM, n_params_RDEM * j + i + 1)
#plt.subplots_adjust(left=0, bottom=0, right=0, top=0, wspace=0, hspace=0)
p_RDEM.plot_conditional(i, j, with_ellipses = False, with_points = False, grid_points=30)
plt.xlabel(parameters_RDEM[i])
plt.ylabel(parameters_RDEM[j])
# plt.savefig("chains/marginals_multinest.pdf") #, bbox_inches='tight')
# show("chains/marginals_multinest.pdf")
# In[ ]:
# plt.figure(figsize=(5*n_params, 5*n_params))
# plt.subplot2grid((5*n_params, 5*n_params), loc=(0,0))
for i in range(n_params_RDEM):
# print(5*n_params, 1, i+1)
plt.subplot(5*n_params_RDEM, 1, i+1)
p_RDEM.plot_modes_marginal(i, with_ellipses = True, with_points = False)
plt.ylabel("Probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# outfile = '%s-mode-marginal-cumulative-%d.pdf' % (a.outputfiles_basename,i)
p_RDEM.plot_modes_marginal(i, cumulative = True, with_ellipses = True, with_points = False)
plt.ylabel("Cumulative probability")
plt.xlabel(parameters_RDEM[i])
# plt.savefig(outfile, format='pdf', bbox_inches='tight')
# plt.close()
# In[ ]:
p_RDEM.analyser.get_best_fit()['parameters'], [param0_RDEM_init, param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init]
# In[ ]:
figure(figsize=(10,10))
plot(thdata_RDEM, model_RDEM([param0_RDEM_init,param1_RDEM_init, param2_RDEM_init, param3_RDEM_init, param4_RDEM_init])(thdata_RDEM), label='Initial Model')
errorbar(xdata, ydata, yuncs, fmt='o', label='Data')
plot(thdata_RDEM, model_RDEM(p_RDEM.analyser.get_best_fit()['parameters'])(thdata_RDEM), label='PMN Model')
legend(loc=0)
# In[ ]:
p_RDEM.analyser.get_stats()
| gpl-3.0 |
sylvan5/PRML | ch5/digits.py | 2 | 2415 | #coding:utf-8
import numpy as np
from mlp import MultiLayerPerceptron
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import LabelBinarizer
from sklearn.metrics import confusion_matrix, classification_report
"""
簡易手書き数字データの認識
scikit-learnのインストールが必要
http://scikit-learn.org/
"""
if __name__ == "__main__":
# scikit-learnの簡易数字データをロード
# 1797サンプル, 8x8ピクセル
digits = load_digits()
# 訓練データを作成
X = digits.data
y = digits.target
# ピクセルの値を0.0-1.0に正規化
X /= X.max()
# 多層パーセプトロン
mlp = MultiLayerPerceptron(64, 100, 10, act1="tanh", act2="sigmoid")
# 訓練データ(90%)とテストデータ(10%)に分解
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# 教師信号の数字を1-of-K表記に変換
# 0 => [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
# 1 => [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
# ...
# 9 => [0, 0, 0, 0, 0, 0, 0, 0, 0, 1]
labels_train = LabelBinarizer().fit_transform(y_train)
labels_test = LabelBinarizer().fit_transform(y_test)
# 訓練データを用いてニューラルネットの重みを学習
mlp.fit(X_train, labels_train, learning_rate=0.01, epochs=10000)
# テストデータを用いて予測精度を計算
predictions = []
for i in range(X_test.shape[0]):
o = mlp.predict(X_test[i])
# 最大の出力を持つクラスに分類
predictions.append(np.argmax(o))
print confusion_matrix(y_test, predictions)
print classification_report(y_test, predictions)
# 誤認識したデータのみ描画
# 誤認識データ数と誤っているテストデータのidxを収集
cnt = 0
error_idx = []
for idx in range(len(y_test)):
if y_test[idx] != predictions[idx]:
print "error: %d : %d => %d" % (idx, y_test[idx], predictions[idx])
error_idx.append(idx)
cnt += 1
# 描画
import pylab
for i, idx in enumerate(error_idx):
pylab.subplot(cnt/5 + 1, 5, i + 1)
pylab.axis('off')
pylab.imshow(X_test[idx].reshape((8, 8)), cmap=pylab.cm.gray_r)
pylab.title('%d : %i => %i' % (idx, y_test[idx], predictions[idx]))
pylab.show()
| mit |
kkoksvik/FreeCAD | src/Mod/Ship/shipCapacityCurve/PlotAux.py | 8 | 5494 | #***************************************************************************
#* *
#* Copyright (c) 2011, 2016 *
#* Jose Luis Cercos Pita <jlcercos@gmail.com> *
#* *
#* This program is free software; you can redistribute it and/or modify *
#* it under the terms of the GNU Lesser General Public License (LGPL) *
#* as published by the Free Software Foundation; either version 2 of *
#* the License, or (at your option) any later version. *
#* for detail see the LICENCE text file. *
#* *
#* This program is distributed in the hope that it will be useful, *
#* but WITHOUT ANY WARRANTY; without even the implied warranty of *
#* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
#* GNU Library General Public License for more details. *
#* *
#* You should have received a copy of the GNU Library General Public *
#* License along with this program; if not, write to the Free Software *
#* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 *
#* USA *
#* *
#***************************************************************************
import os
from PySide import QtGui, QtCore
import FreeCAD
import FreeCADGui
from FreeCAD import Base
import Spreadsheet
import matplotlib.ticker as mtick
class Plot(object):
def __init__(self, l, z, v, tank):
""" Constructor. performs the plot and shows it.
@param l Percentages of filling level.
@param z Level z coordinates.
@param v Volume of fluid.
@param tank Active tank instance.
"""
self.plot(l, z, v, tank)
self.spreadSheet(l, z, v, tank)
def plot(self, l, z, v, tank):
""" Perform the areas curve plot.
@param l Percentages of filling level.
@param z Level z coordinates.
@param v Volume of fluid.
@param tank Active tank instance.
@return True if error happens.
"""
try:
import Plot
plt = Plot.figure('Capacity curve')
except ImportError:
msg = QtGui.QApplication.translate(
"ship_console",
"Plot module is disabled, so I cannot perform the plot",
None,
QtGui.QApplication.UnicodeUTF8)
FreeCAD.Console.PrintWarning(msg + '\n')
return True
# Plot the volume as a function of the level percentage
vols = Plot.plot(l, v, 'Capacity')
vols.line.set_linestyle('-')
vols.line.set_linewidth(2.0)
vols.line.set_color((0.0, 0.0, 0.0))
Plot.xlabel(r'$\mathrm{level}$')
Plot.ylabel(r'$V \; [\mathrm{m}^3]$')
plt.axes.xaxis.label.set_fontsize(20)
plt.axes.yaxis.label.set_fontsize(20)
Plot.grid(True)
# Special percentage formatter for the x axis
fmt = '%.0f%%'
xticks = mtick.FormatStrFormatter(fmt)
plt.axes.xaxis.set_major_formatter(xticks)
# Now duplicate the axes
ax = Plot.addNewAxes()
# Y axis can be placed at right
ax.yaxis.tick_right()
ax.spines['right'].set_color((0.0, 0.0, 0.0))
ax.spines['left'].set_color('none')
ax.yaxis.set_ticks_position('right')
ax.yaxis.set_label_position('right')
# And X axis can be placed at top
ax.xaxis.tick_top()
ax.spines['top'].set_color((0.0, 0.0, 1.0))
ax.spines['bottom'].set_color('none')
ax.xaxis.set_ticks_position('top')
ax.xaxis.set_label_position('top')
# Plot the volume as a function of the level z coordinate
vols = Plot.plot(z, v, 'level')
vols.line.set_linestyle('-')
vols.line.set_linewidth(2.0)
vols.line.set_color((0.0, 0.0, 1.0))
Plot.xlabel(r'$z \; [\mathrm{m}]$')
Plot.ylabel(r'$V \; [\mathrm{m}^3]$')
ax.xaxis.label.set_fontsize(20)
ax.yaxis.label.set_fontsize(20)
ax.xaxis.label.set_color((0.0, 0.0, 1.0))
ax.tick_params(axis='x', colors=(0.0, 0.0, 1.0))
Plot.grid(True)
# End
plt.update()
return False
def spreadSheet(self, l, z, v, tank):
""" Write the output data file.
@param l Percentages of filling level.
@param z Level z coordinates.
@param v Volume of fluid.
@param tank Active tank instance.
"""
s = FreeCAD.activeDocument().addObject('Spreadsheet::Sheet',
'Capacity curve')
# Print the header
s.set("A1", "Percentage of filling level")
s.set("B1", "Level [m]")
s.set("C1", "Volume [m^3]")
# Print the data
for i in range(len(l)):
s.set("A{}".format(i + 2), str(l[i]))
s.set("B{}".format(i + 2), str(z[i]))
s.set("C{}".format(i + 2), str(v[i]))
# Recompute
FreeCAD.activeDocument().recompute() | lgpl-2.1 |
bsipocz/astroML | doc/conf.py | 2 | 7960 | # -*- coding: utf-8 -*-
#
# astroML documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 6 15:37:12 2011.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import matplotlib.sphinxext.plot_directive
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
#sys.path.insert(0, os.path.abspath('../'))
# override default gen_rst
sys.path.insert(0, os.path.abspath('sphinxext'))
try:
import gen_rst
except:
pass
try:
import gen_figure_rst
except:
pass
try:
import gen_paper_rst
except:
pass
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['gen_rst', 'gen_figure_rst', 'gen_paper_rst',
'sphinx.ext.autodoc', 'sphinx.ext.doctest',
'sphinx.ext.imgmath', 'sphinx.ext.viewcode',
'sphinx.ext.autosummary', 'sphinx.ext.mathjax',
matplotlib.sphinxext.plot_directive.__name__]
import numpy_ext.numpydoc
extensions.append('numpy_ext.numpydoc')
autosummary_generate=True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# generate autosummary even if no references
autosummary_generate = True
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'astroML'
copyright = '2012-2020, Jake Vanderplas & AstroML Developers'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '1.0'
# The full version, including alpha/beta/rc tags.
release = '1.0'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# Generate the plots for the gallery
plot_gallery = True
# Generate example gallery
figure_gallery = True
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build', 'includes', '_templates', '_static']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'astroML'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
html_theme_path = ['themes']
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = 'logos/Logo.gif'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = 'logos/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'astroMLdoc'
# -- Options for LaTeX output --------------------------------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
#latex_font_size = '10pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'astroML.tex', 'astroML Documentation',
'Jake Vanderplas & astroML Developers', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'astroML', 'astroML Documentation',
['Jake Vanderplas'], 1)
]
| bsd-2-clause |
damaggu/SAMRI | samri/plotting/aggregate.py | 1 | 4664 | import matplotlib.pyplot as plt
import pandas as pd
import seaborn.apionly as sns
from os import path
from matplotlib import rcParams
EXTRA_COLORSET = ["#797979","#000000","#505050","#FFFFFF","#B0B0B0",]
def registration_qc(df,
cmap="Set3",
extra=False,
extra_cmap=EXTRA_COLORSET,
group={"sub":"Subject"},
repeat={"ses":"Session"},
samri_style=True,
save_as=False,
show=True,
value={"similarity":"Similarity"},
values_rename={},
):
"""Aggregate plot of similarity metrics for registration quality control
Parameters
----------
df : pandas.DataFrame or str
Pandas Dataframe or CSV file containing similarity scores.
cmap : str or list, optional
If a string, the variable specifies the matplotlib colormap [2]_ (qualitative colormaps are recommended) to use for `repeat` highlighting. If a List, the variable should be a list of colors (e.g. `["#00FF00","#2222FF"]`).
extra_cmap : str or list, optional
If a string, the variable specifies the matplotlib colormap [2]_ (qualitative colormaps are recommended) to use for `extra` highlighting, which is applied as a contour to the `repeat`-colored pacthes. If a List, the variable should be a list of colors (e.g. `["#00FF00","#2222FF"]`).
group : str or dict, optional
Column of `df` to use as the group factor (values of this factor will represent the x-axis). If a dictionary is passed, the column named for the key of the dictionary is renamed to the value, and the value name is then used as the group factor. This is useful for the input of longer but clearer names for plotting.
samri_style : bool, optional
Whether to apply a generic SAMRI style to the plot.
save_as : str, optional
Path under which to save the generated plot (format is interpreted from provided extension).
show : bool, optional
Whether to show the plot in an interactive window.
repeat : str or dict, optional
Column of `df` to use as the repeat factor (values of this factor will be represent via different hues, according to `cmap`). If a dictionary is passed, the column named for the key of the dictionary is renamed to the value, and the value name is then used as the group factor. This is useful for the input of longer but clearer names for plotting.
value : str or dict, optional
Column of `df` to use as the value (this variable will be represented on the y-axis). If a dictionary is passed, the column named for the key of the dictionary is renamed to the value, and the value name is then used as the group factor. This is useful for the input of longer but clearer names for plotting.
values_rename : dict, optional
Dictionary used to rename values in `df`. This is useful for the input of longer but clearer names for plotting (this parameter will not rename column names, for renaming those, see parameters `extra`, `group`, `repeat`, and `value`).
Returns
-------
pandas.DataFrame
ANOVA summary table in DataFrame format.
Reference
----------
.. [1] http://goanna.cs.rmit.edu.au/~fscholer/anova.php
.. [2] https://matplotlib.org/examples/color/colormaps_reference.html
.. [3] http://www.statsmodels.org/dev/example_formulas.html
"""
if samri_style:
this_path = path.dirname(path.realpath(__file__))
plt.style.use(path.join(this_path,"samri.conf"))
try:
if isinstance(df, basestring):
df = path.abspath(path.expanduser(df))
df = pd.read_csv(df)
except NameError:
if isinstance(df, str):
df = path.abspath(path.expanduser(df))
df = pd.read_csv(df)
for key in values_rename:
df.replace(to_replace=key, value=values_rename[key], inplace=True)
column_renames={}
if isinstance(value, dict):
column_renames.update(value)
value = list(value.values())[0]
if isinstance(group, dict):
column_renames.update(group)
group = list(group.values())[0]
if isinstance(repeat, dict):
column_renames.update(repeat)
repeat = list(repeat.values())[0]
if isinstance(extra, dict):
column_renames.update(extra)
extra = list(extra.values())[0]
df = df.rename(columns=column_renames)
if extra:
myplot = sns.swarmplot(x=group, y=value, hue=extra, data=df,
size=rcParams["lines.markersize"]*1.4,
palette=sns.color_palette(extra_cmap),
)
myplot = sns.swarmplot(x=group, y=value, hue=repeat, data=df,
edgecolor=(1, 1, 1, 0.0),
linewidth=rcParams["lines.markersize"]*.4,
palette=sns.color_palette(cmap),
)
else:
myplot = sns.swarmplot(x=group, y=value, hue=repeat, data=df,
palette=sns.color_palette(cmap),
size=rcParams["lines.markersize"]*2,
)
plt.legend(loc=rcParams["legend.loc"])
if show:
sns.plt.show()
if save_as:
plt.savefig(path.abspath(path.expanduser(save_as)), bbox_inches='tight')
| gpl-3.0 |
evgchz/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
nathania/data-science-from-scratch | code/gradient_descent.py | 53 | 5895 | from __future__ import division
from collections import Counter
from linear_algebra import distance, vector_subtract, scalar_multiply
import math, random
def sum_of_squares(v):
"""computes the sum of squared elements in v"""
return sum(v_i ** 2 for v_i in v)
def difference_quotient(f, x, h):
return (f(x + h) - f(x)) / h
def plot_estimated_derivative():
def square(x):
return x * x
def derivative(x):
return 2 * x
derivative_estimate = lambda x: difference_quotient(square, x, h=0.00001)
# plot to show they're basically the same
import matplotlib.pyplot as plt
x = range(-10,10)
plt.plot(x, map(derivative, x), 'rx') # red x
plt.plot(x, map(derivative_estimate, x), 'b+') # blue +
plt.show() # purple *, hopefully
def partial_difference_quotient(f, v, i, h):
# add h to just the i-th element of v
w = [v_j + (h if j == i else 0)
for j, v_j in enumerate(v)]
return (f(w) - f(v)) / h
def estimate_gradient(f, v, h=0.00001):
return [partial_difference_quotient(f, v, i, h)
for i, _ in enumerate(v)]
def step(v, direction, step_size):
"""move step_size in the direction from v"""
return [v_i + step_size * direction_i
for v_i, direction_i in zip(v, direction)]
def sum_of_squares_gradient(v):
return [2 * v_i for v_i in v]
def safe(f):
"""define a new function that wraps f and return it"""
def safe_f(*args, **kwargs):
try:
return f(*args, **kwargs)
except:
return float('inf') # this means "infinity" in Python
return safe_f
#
#
# minimize / maximize batch
#
#
def minimize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
"""use gradient descent to find theta that minimizes target function"""
step_sizes = [100, 10, 1, 0.1, 0.01, 0.001, 0.0001, 0.00001]
theta = theta_0 # set theta to initial value
target_fn = safe(target_fn) # safe version of target_fn
value = target_fn(theta) # value we're minimizing
while True:
gradient = gradient_fn(theta)
next_thetas = [step(theta, gradient, -step_size)
for step_size in step_sizes]
# choose the one that minimizes the error function
next_theta = min(next_thetas, key=target_fn)
next_value = target_fn(next_theta)
# stop if we're "converging"
if abs(value - next_value) < tolerance:
return theta
else:
theta, value = next_theta, next_value
def negate(f):
"""return a function that for any input x returns -f(x)"""
return lambda *args, **kwargs: -f(*args, **kwargs)
def negate_all(f):
"""the same when f returns a list of numbers"""
return lambda *args, **kwargs: [-y for y in f(*args, **kwargs)]
def maximize_batch(target_fn, gradient_fn, theta_0, tolerance=0.000001):
return minimize_batch(negate(target_fn),
negate_all(gradient_fn),
theta_0,
tolerance)
#
# minimize / maximize stochastic
#
def in_random_order(data):
"""generator that returns the elements of data in random order"""
indexes = [i for i, _ in enumerate(data)] # create a list of indexes
random.shuffle(indexes) # shuffle them
for i in indexes: # return the data in that order
yield data[i]
def minimize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
data = zip(x, y)
theta = theta_0 # initial guess
alpha = alpha_0 # initial step size
min_theta, min_value = None, float("inf") # the minimum so far
iterations_with_no_improvement = 0
# if we ever go 100 iterations with no improvement, stop
while iterations_with_no_improvement < 100:
value = sum( target_fn(x_i, y_i, theta) for x_i, y_i in data )
if value < min_value:
# if we've found a new minimum, remember it
# and go back to the original step size
min_theta, min_value = theta, value
iterations_with_no_improvement = 0
alpha = alpha_0
else:
# otherwise we're not improving, so try shrinking the step size
iterations_with_no_improvement += 1
alpha *= 0.9
# and take a gradient step for each of the data points
for x_i, y_i in in_random_order(data):
gradient_i = gradient_fn(x_i, y_i, theta)
theta = vector_subtract(theta, scalar_multiply(alpha, gradient_i))
return min_theta
def maximize_stochastic(target_fn, gradient_fn, x, y, theta_0, alpha_0=0.01):
return minimize_stochastic(negate(target_fn),
negate_all(gradient_fn),
x, y, theta_0, alpha_0)
if __name__ == "__main__":
print "using the gradient"
v = [random.randint(-10,10) for i in range(3)]
tolerance = 0.0000001
while True:
#print v, sum_of_squares(v)
gradient = sum_of_squares_gradient(v) # compute the gradient at v
next_v = step(v, gradient, -0.01) # take a negative gradient step
if distance(next_v, v) < tolerance: # stop if we're converging
break
v = next_v # continue if we're not
print "minimum v", v
print "minimum value", sum_of_squares(v)
print
print "using minimize_batch"
v = [random.randint(-10,10) for i in range(3)]
v = minimize_batch(sum_of_squares, sum_of_squares_gradient, v)
print "minimum v", v
print "minimum value", sum_of_squares(v)
| unlicense |
jeroendierckx/Camelot | camelot/core/utils.py | 1 | 9051 | # ============================================================================
#
# Copyright (C) 2007-2012 Conceptive Engineering bvba. All rights reserved.
# www.conceptive.be / project-camelot@conceptive.be
#
# This file is part of the Camelot Library.
#
# This file may be used under the terms of the GNU General Public
# License version 2.0 as published by the Free Software Foundation
# and appearing in the file license.txt included in the packaging of
# this file. Please review this information to ensure GNU
# General Public Licensing requirements will be met.
#
# If you are unsure which license is appropriate for your use, please
# visit www.python-camelot.com or contact project-camelot@conceptive.be
#
# This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
# WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# For use of this library in commercial applications, please contact
# project-camelot@conceptive.be
#
# ============================================================================
"""Utility functions"""
from PyQt4 import QtGui
from PyQt4 import QtCore
import logging
logger = logging.getLogger('camelot.core.utils')
def is_deleted_pyqt( qobj ):
"""
:param qobj: a :class:`QtCore.QObject`
:return: :keyword:`True` if the qobj was deleted, :keyword:`False`
otherwise
"""
import sip
return sip.isdeleted( qobj )
def is_deleted_pyside( qobj ):
"""
:param qobj: a :class:`QtCore.QObject`
:return: :keyword:`True` if the qobj was deleted, :keyword:`False`
otherwise
"""
return False
if hasattr(QtCore, 'PYQT_VERSION_STR'):
pyqt = True
is_deleted = is_deleted_pyqt
else:
pyqt = False
is_deleted = is_deleted_pyside
# try to activate the PySide backend of matplotlib
# http://www.scipy.org/Cookbook/Matplotlib/PySide
try:
import matplotlib
matplotlib.rcParams['backend.qt4'] = 'PySide'
except:
pass
def create_constant_function(constant):
return lambda:constant
class CollectionGetterFromObjectGetter(object):
"""Convert an object getter to a collection getter. The resulting
class is callable and will make sure object_getter is only called
once, even if collection getter is called multiple times.
"""
def __init__(self, object_getter):
""":param object_getter: a function that returns the object to
be put in the collection.
"""
self._object_getter = object_getter
self._collection = None
def __call__(self):
if not self._collection:
self._collection = [self._object_getter()]
return self._collection
"""
A Note on GUI Types
Because QVariant is part of the QtCore library, it cannot provide conversion
functions to data types defined in QtGui, such as QColor, QImage, and QPixmap.
In other words, there is no toColor() function. Instead, you can use the
QVariant.value() or the qVariantValue() template function. For example:
QVariant variant;
...
QColor color = variant.value<QColor>();
The inverse conversion (e.g., from QColor to QVariant) is automatic for all
data types supported by QVariant, including GUI-related types:
QColor color = palette().background().color();
QVariant variant = color;
"""
def variant_to_pyobject(qvariant=None):
"""Try to convert a QVariant to a python object as good as possible"""
if not pyqt:
return qvariant
import datetime
if not qvariant:
return None
if qvariant.isNull():
return None
type = qvariant.type()
if type == QtCore.QVariant.String:
value = unicode(qvariant.toString())
elif type == QtCore.QVariant.Date:
value = qvariant.toDate()
value = datetime.date(year=value.year(),
month=value.month(),
day=value.day())
elif type == QtCore.QVariant.Int:
value = int(qvariant.toInt()[0])
elif type == QtCore.QVariant.LongLong:
value = int(qvariant.toLongLong()[0])
elif type == QtCore.QVariant.Double:
value = float(qvariant.toDouble()[0])
elif type == QtCore.QVariant.Bool:
value = bool(qvariant.toBool())
elif type == QtCore.QVariant.Time:
value = qvariant.toTime()
value = datetime.time(hour = value.hour(),
minute = value.minute(),
second = value.second())
elif type == QtCore.QVariant.DateTime:
value = qvariant.toDateTime()
value = value.toPyDateTime ()
elif type == QtCore.QVariant.Color:
value = QtGui.QColor(qvariant)
else:
value = qvariant.toPyObject()
return value
#
# Global dictionary containing all user defined translations in the
# current locale
#
_translations_ = {}
#
# Encoding used when transferring translation strings from
# python to qt
#
_encoding=QtCore.QCoreApplication.UnicodeUTF8
def set_translation(source, value):
"""Store a tranlation in the global translation dictionary"""
_translations_[source] = value
def load_translations():
"""Fill the global dictionary of translations with all data from the
database, to be able to do fast gui thread lookups of translations"""
language = unicode(QtCore.QLocale().name())
from sqlalchemy import sql
from camelot.model.i18n import Translation
# only load translations when the camelot model is active
if not hasattr(Translation, 'query'):
return
query = sql.select( [Translation.source, Translation.value],
whereclause = sql.and_(Translation.language==language,
Translation.value!=None,
Translation.value!=u'') )
for source, value in Translation.query.session.execute(query):
_translations_[source] = value
def _qtranslate(string_to_translate):
"""Translate a string using the QCoreApplication translation framework
:param string_to_translate: a unicode string
:return: the translated unicode string if it was possible to translate
"""
return unicode(QtCore.QCoreApplication.translate('',
string_to_translate.encode('utf-8'),
encoding=_encoding))
def ugettext(string_to_translate):
"""Translate the string_to_translate to the language of the current locale.
This is a two step process. First the function will try to get the
translation out of the Translation entity, if this is not successfull, the
function will ask QCoreApplication to translate string_to_translate (which
tries to get the translation from the .qm files)"""
assert isinstance(string_to_translate, basestring)
result = _translations_.get(string_to_translate, None)
if not result:
result = _qtranslate( string_to_translate )
#print string_to_translate, result
# try one more time with string_to_translate capitalized
if result is string_to_translate:
result2 = _qtranslate( string_to_translate.capitalize() )
if result2 is not string_to_translate.capitalize():
result = result2
return result
def dgettext(domain, message):
"""Like ugettext but look the message up in the specified domain.
This uses the Translation table.
"""
assert isinstance(message, basestring)
from camelot.model.i18n import Translation
from sqlalchemy import sql
query = sql.select( [Translation.value],
whereclause = sql.and_(Translation.language.like('%s%%'%domain),
Translation.source==message) ).limit(1)
for translation in Translation.query.session.execute(query):
return translation[0]
return message
class ugettext_lazy(object):
"""Like :function:`ugettext`, but delays the translation until the string
is shown to the user. This makes it possible for the user to translate
the string.
"""
def __init__(self, string_to_translate):
assert isinstance(string_to_translate, basestring)
self._string_to_translate = string_to_translate
def __str__(self):
return ugettext(self._string_to_translate)
def __unicode__(self):
return ugettext(self._string_to_translate)
def __eq__(self, other_string):
if isinstance(other_string, basestring):
return other_string == self._string_to_translate
if isinstance(other_string, ugettext_lazy):
return other_string._string_to_translate == self._string_to_translate
return False
def __ne__(self, other_string):
return not self.__eq__( other_string )
def __repr__(self):
return u"_('%s')"%self._string_to_translate
def format_float(value, precision=3):
return QtCore.QString("%L1").arg(float(value), 0, 'f', precision)
| gpl-2.0 |
WojciechMigda/TCO-PCFStupskiPrize1 | src/cell_patches_dbscan.py | 1 | 8930 | #!/opt/anaconda2/bin/python
# -*- coding: utf-8 -*-
"""
################################################################################
#
# Copyright (c) 2015 Wojciech Migda
# All rights reserved
# Distributed under the terms of the MIT license
#
################################################################################
#
# Filename: cell_patches_kmeans.py
#
# Decription:
# Cell patches from images (with KMeans)
#
# Authors:
# Wojciech Migda
#
################################################################################
#
# History:
# --------
# Date Who Ticket Description
# ---------- --- --------- ------------------------------------------------
# 2015-12-20 wm Initial version
#
################################################################################
"""
from __future__ import print_function
DEBUG = False
__all__ = []
__version__ = 0.1
__date__ = '2015-12-20'
__updated__ = '2015-12-20'
from sys import path as sys_path
sys_path.insert(0, './Pipe')
import pipe as P
def pois(im, num_peaks, footprint_radius=2.5, min_dist=8, thr_abs=0.7):
from skimage.draw import circle
FOOTPRINT_RADIUS = footprint_radius
cxy = circle(4, 4, FOOTPRINT_RADIUS)
from numpy import zeros
cc = zeros((9, 9), dtype=int)
cc[cxy] = 1
from skimage.feature import peak_local_max
MIN_DIST = min_dist
THR_ABS = thr_abs
coordinates = [
peak_local_max(
im[:, :, layer],
min_distance=MIN_DIST,
footprint=cc,
threshold_abs=THR_ABS,
num_peaks=num_peaks) for layer in range(im.shape[2])]
return coordinates
@P.Pipe
def cluster(seq, window, epsilon, with_polar):
from numpy import where,array
from skimagepipes import cart2polar_
w2 = window / 2
for im, pois in seq:
for layer in range(im.shape[2]):
p = pois[layer]
p = p[where(
(p[:, 0] >= w2) &
(p[:, 0] < (im.shape[0] - w2)) &
(p[:, 1] >= w2) &
(p[:, 1] < (im.shape[1] - w2))
)
]
print(str(p.shape[0]) + " pois")
patches = array([im[cx - w2:cx + w2, cy - w2:cy + w2, layer].ravel() for cx, cy in p])
if with_polar:
patches = array([cart2polar_(im[cx - w2:cx + w2, cy - w2:cy + w2, layer]).ravel() for cx, cy in p])
pass
from sklearn.cluster import DBSCAN
#clf = DBSCAN(min_samples=5, eps=3.6)
#clf = DBSCAN(min_samples=5, eps=3.3) # 16x16 [51,148,105]
#clf = DBSCAN(min_samples=5, eps=3.2) # 16x16 [42,105,66]
#clf = DBSCAN(min_samples=5, eps=3.1) # 16x16 [36,57,33]
#clf = DBSCAN(min_samples=5, eps=2.8) # 14x14 [70,259,128]
#clf = DBSCAN(min_samples=5, eps=2.6) # 14x14 [50,104,42*]
#clf = DBSCAN(min_samples=5, eps=2.4) # 14x14 [34,34,11]
#clf = DBSCAN(min_samples=5, eps=2.2) # 12x12 [84*,248,84]
#clf = DBSCAN(min_samples=5, eps=2.1) # 12x12 [69*,155,48]
clf = DBSCAN(eps=epsilon, leaf_size=1000)
clf.fit(patches)
print(clf.components_.shape)
nclust = clf.components_.shape[0]
VISUALIZE = True
VISUALIZE = False
if VISUALIZE:
from skimage.exposure import rescale_intensity
from matplotlib import pyplot as plt
fig, ax = plt.subplots(1, nclust, figsize=(8, 3), sharex=True, sharey=True, subplot_kw={'adjustable':'box-forced'})
for i in range(nclust):
ax[i].imshow(
rescale_intensity(
clf.components_[i].reshape((window, window))
)
,interpolation='nearest'
#,cmap=plt.cm.gray
)
ax[i].axis('off')
pass
fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9,
bottom=0.02, left=0.02, right=0.98)
plt.show()
pass
yield clf.components_
pass
return
def work(in_csv_file, out_csv_file, max_n_pois, patch_size, epsilon, with_polar):
from pypipes import as_csv_rows,iformat,loopcount,itime,iattach
from nppipes import itake,iexpand_dims
from skimagepipes import as_image,as_float,equalize_hist,imshow,trim,rgb_as_hed
from tcopipes import clean
features = (
in_csv_file
| as_csv_rows
#| P.skip(1)
#| P.take(3)
| itake(0)
| P.tee
| iformat('../../data/DX/{}-DX.png')
| as_image
| itime
| loopcount
| trim(0.2)
| as_float
| clean
| rgb_as_hed
| itake(0, axis=2)
| iexpand_dims(axis=2)
| equalize_hist
| imshow("H layer", cmap='gray')
| iattach(pois, max_n_pois)
| cluster(patch_size, epsilon, with_polar)
| P.as_list
)
#print(type(next(features, None)))
#print(next(features, None).shape)
from numpy import vstack
from numpy import savetxt
#print(vstack(features).shape)
savetxt(out_csv_file, vstack(features), delimiter=',', fmt='%f')
pass
def main(argv=None): # IGNORE:C0111
'''Command line options.'''
from sys import argv as Argv
if argv is None:
argv = Argv
pass
else:
Argv.extend(argv)
pass
from os.path import basename
program_name = basename(Argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version, program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by Wojciech Migda on %s.
Copyright 2015 Wojciech Migda. All rights reserved.
Licensed under the MIT License
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
from argparse import ArgumentParser
from argparse import RawDescriptionHelpFormatter
from argparse import FileType
from sys import stdout,stdin
# Setup argument parser
parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter)
#parser.add_argument("-D", "--data-dir",
# type=str, action='store', dest="data_dir", required=True,
# help="directory with input CSV files, BMP 'train' and 'test' subfolders, and where H5 will be stored")
parser.add_argument("-i", "--in-csv",
action='store', dest="in_csv_file", default=stdin,
type=FileType('r'),
help="input CSV file name")
parser.add_argument("-o", "--out-csv",
action='store', dest="out_csv_file", default=stdout,
type=FileType('w'),
help="output CSV file name")
parser.add_argument("-p", "--patch-size",
type=int, default=12, action='store', dest="patch_size",
help="size of square patch to build the codebook upon, in pixels")
parser.add_argument("-N", "--max-pois",
type=int, default=5000, action='store', dest="max_n_pois",
help="max number of PoIs to collect (num_peaks of peak_local_max)")
parser.add_argument("-e", "--epsilon",
type=float, default=2.1, action='store', dest="epsilon",
help="epsilon for DBSCAN")
parser.add_argument("-P", "--with-polar",
default=False, action='store_true', dest="with_polar",
help="convert patches to polar coordinates")
# Process arguments
args = parser.parse_args()
for k, v in args.__dict__.items():
print(str(k) + ' => ' + str(v))
pass
work(args.in_csv_file,
args.out_csv_file,
args.max_n_pois,
args.patch_size,
args.epsilon,
args.with_polar)
return 0
except KeyboardInterrupt:
### handle keyboard interrupt ###
return 0
except Exception as e:
if DEBUG:
raise(e)
pass
indent = len(program_name) * " "
from sys import stderr
stderr.write(program_name + ": " + repr(e) + "\n")
stderr.write(indent + " for help use --help")
return 2
pass
if __name__ == "__main__":
if DEBUG:
from sys import argv
argv.append("--in-csv=../../data/training.csv")
argv.append("--max-pois=5000")
pass
from sys import exit as Exit
Exit(main())
pass
| mit |