repo
stringlengths 3
91
| file
stringlengths 16
152
| code
stringlengths 0
3.77M
| file_length
int64 0
3.77M
| avg_line_length
float64 0
16k
| max_line_length
int64 0
273k
| extension_type
stringclasses 1
value |
---|---|---|---|---|---|---|
MosfireDRP | MosfireDRP-master/MOSFIRE/Options.py | '''
===================
MOSFIRE Options
===================
'''
import getpass
import os
__version__ = '2014.06.10'
npix = 2048
path_bpm = os.path.join(os.path.dirname(__file__), "data", "badpix_10sep2012.fits")
flat = {
"version": 1,
"edge-order": 4, # Polynomial order for edge of slit
"edge-fit-width": 20,
"flat-field-order": 7 # Order of polynomial for fitting the flat field profile
}
wavelength = {
"datadir" : os.path.join(os.path.dirname(__file__), "data"),
"version": 2,
"fractional-wavelength-search": 0.99935, # used in determining oned wavelength solutions
"chebyshev-degree": 5, # polynomial order for fitting wavelengths
}
| 710 | 23.517241 | 96 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Wavelength.py | """
==================================
MOSFIRE Wavelength Calibrations
This module is responsible for determining wavelength solutions.
The wavelength is represented as a physical model of MOSFIRE using the
grating equation, the size of a pixel (18 micron), and the focal length
of the camera (250 mm), the lines per mm of the grating (110.5), and the
order of the grating (Y: 6, J: 5, H: 4, K: 3).
formally, a fifth-order chebyshev polynomial is fit as the wavelength solution,
though the order is specified through Options.py
scale is (pixel size) / (camera focal length)
-- Helper functions also exist for determining the on-order region of
a spectrum --
control flow
1. fit a spatially central pixel on a spectrum interactively. Goal is to
achieve RMS of 0.1 Angstrom or better. The entry point is a class called
InteractiveSolution called from a wrapper called fit_lambda_interactively.
Interactive solution is a visual wrapper around the functions
find_known_lines & fit_chebyshev_to_lines. Currently an old optical model
is used to guestimate the wavelength solution but this could be updated in
the future.
-> Produces lambda_center_coeffs_maskname.npy
2. based on interactive fits, perform automated fits "outwards" from the
central pixel in the spectrum. These fits are performed using the final
linelist from the interactive fit. The entry point is a function called
fit_lambda that iteratively calls fit_outwards_refit.
-> Produces lambda_2d_coeffs_maskname.npy
3. Apply these lambda fits to produce a full wavelength solution
INPUT:
OUTPUT:
npk Apr/May 2012 - Significant enhancements w/ first light data
npk April 26 2011
npk May 4 2011
"""
import logging as log
from multiprocessing import Pool
import os
import itertools
import time
import numpy as np
try:
from astropy.io import fits as pf
except:
import pyfits as pf
from matplotlib import pyplot as pl
from scipy.interpolate import interp1d
from scipy import signal
from scipy import optimize
from matplotlib.widgets import Button
from numpy.polynomial import chebyshev as CV
from MOSFIRE import CSU, Fit, IO, Options, Filters, Detector
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
import pdb
__version__ = "1May2012"
MADLIMIT = 0.1
try:
__IPYTHON__
reload(Options)
reload(CSU)
reload(IO)
reload(Fit)
reload(Filters)
except:
pass
#
# Glue code
#
def filelist_to_wavename(files, band, maskname, options):
start = files[0].split('/')[-1].rstrip(".fits")
end = files[-1].split('/')[-1].rstrip(".fits").split("_")[1]
name = "wave_stack_{0}_{1}-{2}.fits".format(band, start, end)
return name
def grating_results(band):
'''returns the dlambda/dpixel in angstrom for a band'''
orders = {"Y": 6, "J": 5, "H": 4, "K": 3}
order = orders[band]
d = 1e3/110.5 # Groove spacing in micron
pixelsize, focal_length = 18.0, 250e3 # micron
scale = pixelsize/focal_length
dlambda = scale * d / order * 10000
return dlambda
def filelist_to_path(files, band, maskname, options):
outf = filelist_to_wavename(files, band, maskname, options)
return outf
def imcombine(files, maskname, bandname, options, extension=None):
''' This version of imcombine is used to create the wave_stack file
which is used only by the wavelength fitting routine. This imcombine does
not produce science results.
Args:
files: list of strings with file names
maskname: string with name of the mask
bandname: string with name of band
options: passed across from Options file
extension: path to file that contains a well formated fits header
this should be used only when the detector server fails
to write the full FITS header
Results:
writes a median combined image in electron. It is called
wave_stack_[bandname]_[filename range].fits'''
pixelflat_file = "pixelflat_2d_{0}.fits".format(bandname)
flat = IO.readfits(pixelflat_file, use_bpm=True)[1]
flat = flat.filled(1.0)
files = IO.list_file_to_strings(files)
info("combining Wavelength files")
for file in files:
debug(str(file))
debug("list complete")
ADUs = np.zeros((len(files), 2048, 2048))
prevssl = None
prevmn = None
patternid = None
header = None
for i in range(len(files)):
fname = files[i]
thishdr, data, bs = IO.readmosfits(fname, options, extension=extension)
info("Checking maskname and filter for {} {}/{}".format(fname, maskname, thishdr['filter']))
ADUs[i,:,:] = data.filled(0)
if thishdr["aborted"]:
raise Exception("Img '%s' was aborted and should not be used" %
fname)
if prevssl is not None:
if len(prevssl) != len(bs.ssl):
# todo Improve these checks
info( "Reading file "+str(fname))
error("This file contains "+str(len(bs.ssl))+" slits instead of "+str(len(prevssl)))
raise Exception("The stack of input files seems to be of "
"different masks")
prevssl = bs.ssl
if maskname is not None:
if maskname != thishdr["maskname"]:
warning("Maskname specified ({0}) does not match header maskname "
" ({1}).".format(maskname, thishdr["maskname"]))
if thishdr["BUNIT"] != "ADU per coadd":
error("The units of '%s' are not in ADU per coadd and "
"this violates an assumption of the DRP. Some new code "
"is needed in the DRP to handle the new units of "
"'%s'." % (fname, thishdr["BUNIT"]))
raise Exception("The units of '%s' are not in ADU per coadd and "
"this violates an assumption of the DRP. Some new code "
"is needed in the DRP to handle the new units of "
"'%s'." % (fname, thishdr["BUNIT"]))
''' Construct Header'''
if header is None:
header = thishdr
header.set("imfno%2.2i" % (i), fname)
for key in list(header.keys()):
try: val = header[key]
except KeyError:
warning("Header should have key '%s' but does not" % key)
print("Header should have key '%s' but does not" % key)
if key in thishdr:
if val != thishdr[key]:
newkey = "hierarch " + key + ("_img%2.2i" % i)
try: header.set(newkey.rstrip(), thishdr[key])
except ValueError: pass
''' Now handle error checking'''
if maskname is not None:
if thishdr["maskname"] != maskname:
warning("File %s uses mask '%s' but the stack is of '%s'" %
(fname, thishdr["maskname"], maskname))
for key in list(header.keys()):
val = header[key]
if key in thishdr:
if val != thishdr[key]:
newkey = "hierarch " + key + ("_img%2.2i" % i)
try: header.set(newkey.rstrip(), thishdr[key])
except ValueError: pass
''' Now handle error checking'''
if maskname is not None:
if thishdr["maskname"] != maskname:
warning("File %s uses mask '%s' but the stack is of '%s'" %
(fname, thishdr["maskname"], maskname))
info('Done.')
wavename = filelist_to_wavename(files, bandname, maskname, options)
info('Combining images to make {}'.format(wavename))
header.set("frameid", "median")
electrons = np.median(np.array(ADUs) * Detector.gain, axis=0)
IO.writefits(electrons, maskname, wavename, options, overwrite=True,
header=header)
info("Done")
def fit_lambda(maskname,
bandname,
wavenames,
guessnames,
options,
longslit=None,
neon=None,
extension=None,
wavenames2=None):
"""Fit the two-dimensional wavelength solution to each science slit
Inputs:
bandname: The mask name as a string
wavenames: List of wavelength files
options: Dictionary of wavelength options
longlist: True if a longslit
neon: path to neon image [2k x 2k frame]
extension: path to file that contains a well formated fits header
this should be used only when the detector server fails
to write the full FITS header
Prints:
This step prints out lines like
resid ang S01 @ p####: 0.25 rms 0.15 mad [shift10]
which means that the residual error in angstrom units for slit #1,
at pixel location #### is 0.25 angstrom RMS and 0.15 median absolute
deviation. The shift refers to the average amount of pixel shift from
the solution determined during the interactive fitting step.
Results:
lambda_coeffs_...npy: Coefficients file containing an array of
dictionaries:
{"slitno": The 0-indexed slit number into the barset
"lines": The list of fitted emission lines
"center_sol": The Chebyshev coefficients in the center of the
slit
"2d": A dictionary containing:
{'positions': The rows that comprise the slit
'delts:' The mean standard deviation of features.
This list is the length of 'lines'
'lambdaRMS': RMS of delts
'lambdaMAD': MAD of delts
'coeffs': The Chebyshev coefficients for each position}
}
lambda_solution....fits: A fits file with wavelength [Ang] for each
pixel in the image
sigs_solution...fits: A fits file with the standard deviation per row
of the wavelength solution
rectified_wave_stack: The tilted spectra are interpolated onto a common
wavelength grid.
"""
global bs, data, lamout, center_solutions, edgedata, data2, center_solutions2
np.seterr(all="ignore")
""" Set defaults for second set of lines to None """
data2=None
center_solutions=None
wavenames = IO.list_file_to_strings(wavenames)
debug("WAVENAMES"+ str(wavenames))
wavename = filelist_to_wavename(wavenames, bandname, maskname,
options).rstrip(".fits")
guessnames = IO.list_file_to_strings(wavenames)
debug("GUESSNAMES"+str(guessnames))
guessname = filelist_to_wavename(guessnames, bandname, maskname,
options).rstrip(".fits")
fn = "lambda_coeffs_{0}.npy".format(wavename)
info("%s] Writing to: %s" % (maskname, fn))
wavepath = filelist_to_path(wavenames, bandname, maskname,
options)
drop, data = IO.readfits(wavepath, use_bpm=True)
header, drop,bs = IO.readmosfits(wavenames[0], options, extension=extension)
fnum = guessname
center_solutions = IO.load_lambdacenter(fnum, maskname, options)
edgedata, metadata = IO.load_edges(maskname, bandname, options)
""" This neon flag looks like it may be removed -MK 2014 June 10 """
if neon is not None:
drop, Neon = IO.readfits(neon, use_bpm=True)
data += Neon
if wavenames2 is not None:
wavenames2 = IO.list_file_to_strings(wavenames2)
debug("WAVENAMES Second Set:"+str(wavenames2))
wavename2 = filelist_to_wavename(wavenames2, bandname, maskname,
options).rstrip(".fits")
guessnames2 = IO.list_file_to_strings(wavenames2)
debug("GUESSNAMES Second Set:"+str(guessnames2))
guessname2 = filelist_to_wavename(guessnames2, bandname, maskname,
options).rstrip(".fits")
wavepath2 = filelist_to_path(wavenames2, bandname, maskname,
options)
drop, data2 = IO.readfits(wavepath2, use_bpm=True)
header2, drop,bs2 = IO.readmosfits(wavenames2[0], options, extension=extension)
fnum2 = guessname2
center_solutions2 = IO.load_lambdacenter(fnum2, maskname, options)
if longslit is not None and longslit['mode'] is "longslit":
info("*** Longslit mode *** Slitedges set to:")
info("Bottom: "+str(edgedata[0]["yposs_bot"][0]))
info("Top: "+str(edgedata[0]["yposs_top"][0]))
solutions = []
lamout = np.zeros(shape=(2048, 2048), dtype=np.float32)
tock = time.time()
multicore = True
if multicore:
p = Pool()
solutions = p.map(fit_lambda_helper, list(range(len(bs.ssl))))
p.close()
else:
solutions = map(fit_lambda_helper, list(range(len(bs.ssl))))
tick = time.time()
info("-----> Mask took %i" % (tick-tock))
try: os.remove(fn)
except: pass
np.save(fn, solutions)
return solutions
def fit_lambda_helper(slitno):
"""This helper function exists for multiprocessing suport"""
global bs, data, lamout, center_solutions, edgedata, data2, center_solutions2
slitidx = slitno-1
tick = time.time()
slitedges = edgedata
sol_1d = center_solutions[slitidx]["sol_1d"]
edge = slitedges[slitidx]
linelist = center_solutions[slitidx]["linelist"]
start = center_solutions[slitidx]["extract_pos"]
bottom = np.ceil(edge["bottom"](1024))+2
top = np.ceil(edge["top"](1024))-2
info(("* Fitting Slit %s from %i to %i" % (bs.ssl[slitno]["Target_Name"],
bottom, top)))
if data2 is not None:
sol_1d2 = center_solutions2[slitidx]["sol_1d"]
linelist2 = center_solutions2[slitidx]["linelist"]
sol_2d = fit_outwards_refit(data, bs, sol_1d, linelist, Options.wavelength,
start, bottom, top, slitno, data2=data2, linelist2=linelist2, sol_1d2=sol_1d2)
else:
sol_2d = fit_outwards_refit(data, bs, sol_1d, linelist, Options.wavelength,
start, bottom, top, slitno)
sol = {"slitno": slitno, "center_sol": np.array(sol_1d[1]), "2d":
sol_2d, "lines": np.array(linelist)}
info("S%2.2i] TOOK: %i s" % (slitno, time.time()-tick))
return sol
def apply_interactive(maskname, band, options, apply=None, to=None, neon=False,
argon=False, extension=None,short_exp = False):
"""Fit the one-dimensional wavelength solution to each science slit"""
np.seterr(all="ignore")
# Load the guess wavelength solution data
wavenames = IO.list_file_to_strings(apply)
wavename = filelist_to_path(wavenames, band, maskname, options)
fn = "lambda_center_coeffs_{0}.npy".format(wavename.rstrip(".fits"))
waves = np.load(fn)
# Load the arc lamp
to_files = IO.list_file_to_strings(to)
to_filename = filelist_to_path(to_files, band, maskname, options)
mfits = IO.readfits(to_filename, use_bpm=True)
(drop, data) = mfits
(header, drop, bs) = IO.readmosfits(wavenames[0], options, extension=extension)
mfits = header, data, bs
linelist = pick_linelist(header, neon=neon, argon=argon, short_exp = short_exp)
solutions = []
pix = np.arange(2048)
for slitno in range(len(waves)):
info("Slit number = "+str(slitno+1))
csuslits = bs.scislit_to_csuslit(slitno+1)
try:
l = len(csuslits)
if l > 1:
csuslit = csuslits[l//2]
else:
csuslit = csuslits[0]
except:
csuslit = csuslits
extract_pos = bs.science_slit_to_pixel(slitno+1)
cfit = waves[slitno]['sol_1d'][1]
spec = \
np.ma.mean(data[extract_pos-1:extract_pos+1, :],
axis=0) # axis = 0 is spatial direction
STD = np.inf
n_attempts = 5
while (n_attempts > 0) and (STD > .3) :
ll = CV.chebval(pix, cfit)
[xs, sxs, sigmas] = find_known_lines(linelist, ll, spec, options)
[deltas, cfit, perror] = fit_chebyshev_to_lines(xs, sxs, linelist, options)
ok = np.isfinite(deltas)
STD = np.std(deltas[ok])
MAD = np.median(np.abs(deltas[ok]))
n_attempts -= 1
solutions.append({"linelist": linelist, "MAD": MAD,
"foundlines": xs, "foundlinesig": sxs,
"sol_1d": [deltas, cfit, sigmas], "STD":
STD, "slitno": slitno, "extract_pos":
extract_pos})
info("slitno %2.0i STD: %1.2f MAD: %1.2f" % (slitno+1, STD, MAD))
# Output filename
info(str(to_filename))
outfn = "lambda_center_coeffs_{0}.npy".format(to_filename.rstrip(".fits"))
np.save(outfn, solutions)
def check_wavelength_roi(maskname, band, skyfiles, arcfiles, LROI, options, no_check=False):
'''The purpose of this function is to help the user selection a wavelength
range of interest over which to normalize the arcs versus sky solutions.
'''
skyfiles = IO.list_file_to_strings(skyfiles)
skyfilename = filelist_to_path(skyfiles, band, maskname, options)
fn = "lambda_center_coeffs_{0}.npy".format(skyfilename.rstrip(".fits"))
skysols = np.load(fn)
# Load the arc wavelength solution data
arcfiles = IO.list_file_to_strings(arcfiles)
arcfilename = filelist_to_path(arcfiles, band, maskname, options)
fn = "lambda_center_coeffs_{0}.npy".format(arcfilename.rstrip(".fits"))
arcsols = np.load(fn)
if len(skysols) != len(arcsols):
error("Number of slits in sky (%i) and arcs (%i) is different" % ( len(skysols) , len(arcsols)))
raise Exception("Number of slits in sky (%i) and arcs (%i) is different" % ( len(skysols) , len(arcsols)))
pix = np.arange(2048)
pl.figure(1)
if len (LROI) == 1: LROI *= len(skysols)
if len (LROI) != len(skysols):
error("Number of solutions is not equal to the LROI vector (%i!=%i)" % ( len(LROI), len(skysols)))
raise Exception("Number of solutions is not equal to the LROI vector (%i!=%i)" % ( len(LROI), len(skysols)))
MeanDiffs = []
for i in range(len(skysols)):
s = skysols[i]
a = arcsols[i]
ls = CV.chebval(pix, s['sol_1d'][1])
la = CV.chebval(pix, a['sol_1d'][1])
roi = (ls > LROI[i][0]) & (ls < LROI[i][1])
la -= np.mean( (la-ls)[roi] )
diff = ls - la
pl.plot(ls, diff)
pl.axvline(LROI[i][0], color='blue')
pl.axvline(LROI[i][1], color='red')
roi = (ls > 21500) & (ls < 22000)
MeanDiffs.append(np.mean(ls[roi] - la[roi]))
if not no_check:
pl.xlim(19000, 25000)
pl.ylim(-7,7)
pl.grid(True)
pl.title("Close this window to continue")
pl.xlabel("Sky Wavelength [Angstrom]")
pl.ylabel("Sky - Arc Wavelength [Angstrom]")
MeanDiffs =np.array(MeanDiffs)
info("RMS in 21500 < lambda < 22000 is %2.2f Ang" % (
np.sqrt(np.mean(MeanDiffs**2))))
pl.show()
return LROI
def fit_lambda_interactively(maskname, band, wavenames, options, neon=None,
longslit=None,argon=None, extension=None,
bypass=False, noninteractive=False, short_exp=False):
"""Fit the one-dimensional wavelength solution to each science slit
Args:
maskname: The maskname
band: The spectral band [Y, J, H, K]
wavenames: List of wavelength standard files
options: Options dictionary
neon: Using neon emission lines
argon: Using argon emission lines
extension: path to file that contains a well formated fits header
this should be used only when the detector server fails
to write the full FITS header
longslit: Longslit dictionary containing {"yrange": [a,b] and "row_position": YY}
Note that [a,b] is the range to extract the longslit spectrum over
row_position is the location to perform the interactive solution over. This
row should be clean of any contaminatring light
noninteractive: Bypass the manual fitting and run an autofit routine.
bypass: Bypass the manual fitting and run an autofit routine. (This is
a duplicate of noninteractive above for backward compatibility).
"""
## Set noninteractive mode if either noninteractive or bypass is set
noninteractive = noninteractive or bypass
np.seterr(all="ignore")
wavenames = IO.list_file_to_strings(wavenames)
input_f = filelist_to_path(wavenames, band, maskname, options)
debug("{0} resolves to input files: {1}".format(str(wavenames), str(input_f)))
info("The wavelength files resolve to input file {0}".format(str(input_f)))
mfits = IO.readfits(input_f, use_bpm=True)
(drop, data) = mfits
(header, drop, bs) = IO.readmosfits(wavenames[0], options, extension=extension)
mfits = header, data, bs
name = filelist_to_wavename(wavenames, band, maskname, options)
fn = "lambda_center_coeffs_{0}.npy".format(name.rstrip(".fits"))
linelist = pick_linelist(header, neon=neon, argon=argon, short_exp = short_exp)
try:
solutions = np.load(fn)
info( "Solutions loaded from: "+str(fn))
except IOError: solutions = None
lamout = np.zeros(shape=(2048, 2048), dtype=np.float32)
tock = time.time()
outfilename = fn
if noninteractive is False:
fig = pl.figure(1,figsize=(16,8))
else:
fig = None
info("Started interactive solution")
if longslit is not None and longslit['mode'] is "longslit":
starting_pos = longslit["row_position"]
info("*** LONGSLIT MODE *** Extract position set to %i" % starting_pos)
else:
starting_pos = None
debug("using line list")
debug(linelist)
II = InteractiveSolution(fig, mfits, linelist, options, 1,
outfilename, solutions=solutions, noninteractive=noninteractive, starting_pos=starting_pos)
info( "Waiting")
if noninteractive is False:
pl.ioff()
pl.show()
#pl.draw()
#pl.show(block=True)
info("save to: "+str(fn))
np.save(outfilename, np.array(II.solutions))
np.save('barset.npy', [II.bs])
def polyfit2d(f, x, y, unc=1.0, orderx=1,ordery=1):
"""Fit a polynomial surface to 2D data, assuming the axes are
independent of each other.
Evaluate the fit via:
f = np.polyval(polyy, y) + np.polyval(polyx, x)
Usage:
polyx, polyy, cov = polyfit2d(f, x, y, unc=None, bad=None)
Input:
f = an array of values to fit
x, y = the coordinates for each value of v
Optional inputs:
unc = the uncertainties, either one for each value of v or a
single value for all values; if set to None, then the
stdev of v is used (an inaccurate measurement of the
uncertainty, to be sure)
orderx = the polynomial order (for one dimension) of the fitting
function
ordery = same as above
Return value:
polyx, polyy = the polynomial coefficients (same format as
polyfit)
cov = the covariance matrix
v1.0.0 Written by Michael S. Kelley, UMD, Mar 2009
modified 13 aug 2012 by NPK, Caltech
"""
# the fitting function
def chi(p, y, x, f, unc, orderx, ordery):
cy = p[:1+ordery]
cx = p[1+orderx:]
model = np.zeros(f.shape) + np.polyval(cy, y) + np.polyval(cx, x)
chi = (f - model) / unc
return chi
# run the fit
lsq = optimize.leastsq
guess = np.zeros((orderx + 1)*(ordery+1))
result = lsq(chi, guess, args=(y, x, f, unc, orderx, ordery),
full_output=True)
fit = result[0]
cov = result[1]
cy = fit[:1+ordery]
cx = fit[1+orderx:]
return (cx, cy, cov)
def helper_apply_sky_and_arc(positions, coeffs, lambdaMAD):
global lams, sigs
def find_pixel_offset(lam_sky, coeff_arc, LROI):
'''Find the best pixel offset between the sky wavelength solution and
Chebyshev fitting polymoials by looping over a pixel range.'''
roi = (lam_sky > LROI[0]) & (lam_sky < LROI[1])
dpixs = np.arange(-1, 1, .01)
RMSs = np.zeros(len(dpixs))
xx = np.arange(2048)
for rms_cnt in range(len(dpixs)):
dpix = dpixs [rms_cnt]
dl = lam_sky - CV.chebval(xx - dpix, coeff_arc)
RMSs[rms_cnt] = np.sqrt(np.mean(dl[roi]**2))
minix = np.argmin(RMSs)
best_dpix = dpixs[minix]
return best_dpix
def apply_lambda_sky_and_arc(maskname, bandname, skynames, arcnames, LROIs,
options, longslit=None, smooth=True, neon=True, extension=None, short_exp = False):
global lams, sigs
skynames = IO.list_file_to_strings(skynames)
arcnames = IO.list_file_to_strings(arcnames)
skyname = filelist_to_wavename(skynames, bandname, maskname,
options).rstrip(".fits")
arcname = filelist_to_wavename(arcnames, bandname, maskname,
options).rstrip(".fits")
info(str(skyname))
info(str(arcname))
drop, data = IO.readfits(skyname+'.fits', use_bpm=True)
header, drop, bs = IO.readmosfits(skynames[0], options, extension=extension)
skydata = data.filled(0)
drop, data = IO.readfits(arcname+'.fits', use_bpm=True)
header, drop, bs = IO.readmosfits(arcnames[0], options, extension=extension)
arcdata = data.filled(0)
slitedges, edgeinfo = IO.load_edges(maskname, bandname, options)
SkyL = IO.load_lambdadata(skyname, maskname, bandname, options)
ArcL = IO.load_lambdadata(arcname, maskname, bandname, options)
bmap = {"Y": 6, "J": 5, "H": 4, "K": 3}
order = bmap[bandname]
skylines = pick_linelist(header, short_exp = short_exp)
arclines = pick_linelist(header, neon=neon)
# write lambda
lams = np.zeros((2048, 2048), dtype=np.float32)
sigs = np.zeros((2048, 2048), dtype=np.float)
xx = np.arange(2048)
xsamp = np.array(np.linspace(0, 2047, 10), dtype=np.int)
xypairs = []
if len(SkyL) != len(ArcL):
error("Number of lines in Sky file not the same as those in Arc file")
raise Exception("Number of lines in Sky file not the same as those in Arc file")
fitpix = np.arange(0,2048,100)
solutions = []
for i in range(len(SkyL)):
slp = SkyL[i]["2d"]["positions"].astype(np.int)
slc = SkyL[i]["2d"]["coeffs"]
slm = SkyL[i]["2d"]["lambdaMAD"]
alp = ArcL[i]["2d"]["positions"].astype(np.int)
alc = ArcL[i]["2d"]["coeffs"]
alm = ArcL[i]["2d"]["lambdaMAD"]
info("2d wavelengths: Slit %i/%i" % (i+1, len(SkyL)))
prev = 0
dpixels = []
for j in range(len(slp)):
if (slm[j] < 0.2) and (alm[j] < 0.2):
coeff_sky = slc[j]
coeff_arc = alc[j]
lam_sky = CV.chebval(xx, coeff_sky)
lam_arc = CV.chebval(xx, coeff_arc)
# minimize pixel solution
best_dpix = find_pixel_offset(lam_sky, coeff_arc, LROIs[i])
dpixels.append(best_dpix)
# Refit the chebyshev
lambdas = CV.chebval(fitpix - best_dpix, coeff_arc)
arc_positions = lambdas > np.mean(LROIs[i])
sky_positions = lambdas <= np.mean(LROIs[i])
to_fit = CV.chebval(fitpix[sky_positions], coeff_sky)
to_fit = np.append(to_fit,
CV.chebval(fitpix[arc_positions], coeff_arc))
coeffs = CV.chebfit(fitpix, to_fit, options["chebyshev-degree"])
prev = lams[slp[j],:] = CV.chebval(xx, coeffs)
prevcoeff = coeffs
SkyL[i]["2d"]["coeffs"][j,:] = coeffs
else:
lams[slp[j],:] = prev
info("Shifted arc by an average of %1.2f pixels" % (np.mean(dpixels)))
if np.isfinite(np.mean(dpixels)):
if smooth == True:
xr = np.arange(len(slp))
for i in range(lams.shape[1]):
ff = np.poly1d(Fit.polyfit_clip(xr, lams[slp, i], 3))
d = lams[slp,i] - ff(xr)
lams[slp, i] = ff(xr)
info("{0}: writing lambda".format(maskname))
### FIX FROM HERE
fn = "merged_lambda_coeffs_{0}_and_{1}".format(skyname, arcname)
np.save(fn, SkyL)
header = pf.Header()
header.set("maskname", maskname)
header.set("filter", bandname)
header.set("object", "Wavelengths {0}/{1}".format(maskname, bandname))
IO.writefits(lams, maskname, "merged_lambda_solution_{0}_and_{1}.fits".format(skyname, arcname),
options, overwrite=True, header=header)
info("{0}: rectifying".format(maskname))
dlam = np.ma.median(np.diff(lams[1024,:]))
hpp = Filters.hpp[bandname]
ll_fid = np.arange(hpp[0], hpp[1], dlam)
nspec = len(ll_fid)
rectified = np.zeros((2048, nspec), dtype=np.float32)
for i in range(2048):
ll = lams[i,:]
ss = skydata[i,:]
f = interp1d(ll, ss, bounds_error=False)
rectified[i,:] = f(ll_fid)
header.set("object", "Rectified wave FIXME")
header.set("wat0_001", "system=world")
header.set("wat1_001", "wtype=linear")
header.set("wat2_001", "wtype=linear")
header.set("dispaxis", 1)
header.set("dclog1", "Transform")
header.set("dc-flag", 0)
header.set("ctype1", "AWAV")
header.set("cunit1", "Angstrom")
header.set("crval1", ll_fid[0])
header.set("crval2", 0)
header.set("crpix1", 1)
header.set("crpix2", 1)
header.set("cdelt1", 1)
header.set("cdelt2", 1)
header.set("cname1", "angstrom")
header.set("cname2", "pixel")
header.set("cd1_1", dlam.item())
header.set("cd1_2", 0)
header.set("cd2_1", 0)
header.set("cd2_2", 1)
IO.writefits(rectified, maskname, "merged_rectified_{0}_and_{1}.fits".format(skyname, arcname),
options, overwrite=True, lossy_compress=True, header=header)
def apply_lambda_simple(maskname, bandname, wavenames, options,
longslit=None, smooth=True, neon=None, short_exp = False):
"""Convert solutions into final output products. This is the function that
should be used for now."""
wavenames = IO.list_file_to_strings(wavenames)
wavename = filelist_to_wavename(wavenames, bandname, maskname,
options).rstrip(".fits")
wavepath = filelist_to_path(wavenames, bandname, maskname,
options)
drop, data = IO.readfits(wavepath, use_bpm=True)
header, drop, bs = IO.readmosfits(wavenames[0], options)
data = data.filled(0)
slitedges, edgeinfo = IO.load_edges(maskname, bandname, options)
Ld = IO.load_lambdadata(wavename, maskname, bandname, options)
if longslit is not None and longslit['mode'] is "longslit":
info("*** LONGSLIT MODE *** Slit edges set to:")
info("Bottom: "+str(slitedges[0]["yposs_bot"][0]))
info("Top: "+str(slitedges[0]["yposs_top"][0]))
bmap = {"Y": 6, "J": 5, "H": 4, "K": 3}
order = bmap[bandname]
lines = pick_linelist(header, neon=neon, short_exp = short_exp)
# write lambda
lams = np.zeros((2048, 2048), dtype=np.float32)
sigs = np.zeros((2048, 2048), dtype=np.float)
xx = np.arange(2048)
xsamp = np.array(np.linspace(0, 2047, 10), dtype=np.int)
xypairs = []
xs = []
ys = []
zs = []
for i in range(len(Ld)):
lp = Ld[i]["2d"]["positions"].astype(np.int)
lc = Ld[i]["2d"]["coeffs"]
lm = Ld[i]["2d"]["lambdaMAD"]
info("Creating 2d wavelength map: Slit %i/%i" % (i+1, len(Ld)))
prev = 0
for j in range(len(lp)):
sigs[lp[j],:] = lm[j]
if lm[j] < 0.18:
prev = lams[lp[j],:] = CV.chebval(xx, lc[j])
prevcoeff = lc[j]
xs.extend(np.ones(len(xsamp)) * lp[j])
ys.extend(xsamp)
zs.extend(lams[lp[j], xsamp])
else:
lams[lp[j],:] = prev
if smooth == True:
xr = np.arange(len(lp))
for k in range(lams.shape[1]):
ff = np.poly1d(Fit.polyfit_clip(xr, lams[lp, k], 3))
d = lams[lp,k] - ff(xr)
lams[lp, k] = ff(xr)
# if False == True:
# xs,ys,zs = map(np.array, [xs,ys,zs])
# info("smoothing")
#
# polyx, polyy, cov = polyfit2d(np.array(zs,dtype=np.double),
# np.array(ys, dtype=np.double),
# np.array(xs, dtype=np.double),
# orderx=3,ordery=3)
#
# xx, yy = np.array(np.meshgrid(np.arange(2048), lp),
# dtype=np.double)
#
# M = lams[lp,:] = np.polyval(polyy, yy) + np.polyval(polyx, xx)
info("writing {} for {}".format("lambda_solution_{0}.fits".format(wavename), maskname))
header = pf.Header()
header.set("maskname", maskname)
header.set("filter", bandname)
header.set("object", "Wavelengths {0}/{1}".format(maskname, bandname))
wavename = wavename.rstrip(".fits")
IO.writefits(lams, maskname, "lambda_solution_{0}.fits".format(wavename),
options, overwrite=True, header=header)
info("writing {} for {}".format("sigs_solution_{0}.fits".format(wavename), maskname))
header.set("object", "Sigmas {0}/{1}".format(maskname, bandname))
IO.writefits(sigs, maskname, "sigs_solution_{0}.fits".format(wavename),
options, overwrite=True, header=header, lossy_compress=True)
info("writing {} for {}".format("rectified_{0}.fits".format(wavename), maskname))
dlam = 0
central_line = 1024
step = 0
while dlam==0:
line = central_line+(10*step)
dlam = np.ma.median(np.diff(lams[line,:]))
if dlam==0:
line = central_line-(10*step)
dlam = np.ma.median(np.diff(lams[line,:]))
step=step+1
# if a masked array, as returned by numpy 1.9, then get the internal representation
if type(dlam) == np.ma.MaskedArray:
dlam = dlam.item()
info("Non-empty line found at pixel "+str(line))
hpp = Filters.hpp[bandname]
ll_fid = np.arange(hpp[0], hpp[1], dlam)
nspec = len(ll_fid)
rectified = np.zeros((2048, nspec), dtype=np.float32)
for i in range(2048):
ll = lams[i,:]
ss = data[i,:]
f = interp1d(ll, ss, bounds_error=False)
rectified[i,:] = f(ll_fid)
header.set("object", "Rectified wave FIXME")
header.set("wat0_001", "system=world")
header.set("wat1_001", "wtype=linear")
header.set("wat2_001", "wtype=linear")
header.set("dispaxis", 1)
header.set("dclog1", "Transform")
header.set("dc-flag", 0)
header.set("ctype1", "AWAV")
header.set("cunit1", "Angstrom")
header.set("crval1", ll_fid[0])
header.set("crval2", 0)
header.set("crpix1", 1)
header.set("crpix2", 1)
header.set("cdelt1", 1)
header.set("cdelt2", 1)
header.set("cname1", "angstrom")
header.set("cname2", "pixel")
header.set("cd1_1", dlam)
header.set("cd1_2", 0)
header.set("cd2_1", 0)
header.set("cd2_2", 1)
IO.writefits(rectified, maskname, "rectified_{0}.fits".format(wavename),
options, overwrite=True, lossy_compress=True, header=header)
#
# Fitting Methods
#
# Physical models for instrument
def param_guess_functions(band):
"""Parameters determined from experimentation with cooldown 9 data"""
alpha_pixel = np.poly1d([-8.412e-16, 3.507e-12, -3.593e-9,
6.303e-9, 0.9963])
# Note that these numbers were tweaked by hand by npk on 28 apr
# they are not reliable. this function should change dramatically.
if band == 'Y' or band == 'J':
sinbeta_position = np.poly1d([0.0239, 36.2])
sinbeta_pixel = np.poly1d([-2.578e-7, 0.00054, -0.2365])
gamma_pixel = np.poly1d([1.023e-25, -4.313e-22, 7.668e-17, 6.48e-13])
elif band == 'H' or band == 'K':
sinbeta_position = np.poly1d([2.331e-2, 38.24])
sinbeta_pixel = np.poly1d([-2.664e-7, 5.534e-4, -1.992e-1])
gamma_pixel = np.poly1d([1.033e-25, -4.36e-22, 4.902e-19, -8.021e-17,
6.654e-13])
delta_pixel = np.poly1d([-1.462e-11, 6.186e-8, -5.152e-5, -0.0396,
1193]) - 50
return [alpha_pixel, sinbeta_position, sinbeta_pixel,
gamma_pixel, delta_pixel]
def dlambda_model(p):
"""Returns an approximate dlambda/dpixel """
x = 1024
order = p[4]
y = p[5]
(alpha, sinbeta, gamma, delta) = p[0:4]
sinbeta = np.radians(sinbeta)
d = 1e3/110.5 # Groove spacing in micron
pixelsize, focal_length = 18.0, 250e3 # micron
scale = pixelsize/focal_length
costerm = np.cos(scale * (y-1024))
return scale/(order/d) * sinbeta / costerm
def pick_linelist(header, neon=False, argon=False, short_exp = False):
band = header["filter"]
# following linelinests are produced by ccs and can be found in the iraf
# data bases on ramekin:
# /scr2/mosfire/mosfire_sky_lines/database
if band == 'Y':
lines = np.array([
9793.6294 , 9874.84889 , 9897.54143 , 9917.43821 , 10015.6207 ,
10028.0978 , 10046.7027 , 10085.1622 , 10106.4478 , 10126.8684 ,
10174.623 , 10192.4683 , 10213.6107 , 10289.3707 , 10298.7496 ,
10312.3406 , 10350.3153 , 10375.6394 , 10399.0957 , 10421.1394 ,
10453.2888 , 10471.829 , 10512.1022 , 10527.7948 , 10575.5123 ,
10588.6942 , 10731.6768 , 10753.9758 , 10774.9474 , 10834.1592 ,
10844.6328 , 10859.5264 , 10898.7224 , 10926.3765 , 10951.2749 ,
10975.3784 , 11029.8517 , 11072.4773 , 11090.083 , 11140.9467 ,
11156.0366 , ])
if band == 'H':
lines = np.array([
14605.0225 , 14664.9975 , 14698.7767 , 14740.3346 , 14783.7537 ,
14833.029 , 14864.3219 , 14887.5334 , 14931.8767 , 15055.3754 ,
15088.2599 , 15187.1554 , 15240.922 , 15287.7652 ,
15332.3843 , 15395.3014 , 15432.1242 , 15570.0593 , 15597.6252 ,
15631.4697 , 15655.3049 , 15702.5101 , 15833.0432 , 15848.0556 ,
15869.3672 , 15972.6151 , 16030.8077 , 16079.6529 , 16128.6053 ,
16194.6497 , 16235.3623 , 16317.0572 , 16351.2684 , 16388.4977 ,
16442.2868 , 16477.849 , 16502.395 , 16553.6288 , 16610.807 ,
16692.2366 , 16708.8296 , 16732.6568 , 16840.538 , 16903.7002 ,
16955.0726 , 17008.6989 , 17078.3519 , 17123.5694 , 17210.579 ,
17248.5646 , 17282.8514 , 17330.8089 , 17386.0403 , 17427.0418 ,
17449.9205 , 17505.7497 , 17653.0464 , 17671.843 , 17698.7879 ,
17811.3826 , 17880.341 , 17993.9600 , 18067.9500 ])
if band == 'J':
# Removed: 12589.2998 12782.9052 12834.5202
lines = np.array([
11538.7582 , 11591.7013 , 11627.8446 , 11650.7735 , 11696.3379 ,
11716.2294 , 11788.0779 , 11866.4924 , 11988.5382 , 12007.0419 ,
12030.7863 , 12122.4957 , 12135.8356 , 12154.9582 , 12196.3557 ,
12229.2777 , 12257.7632 , 12286.964 , 12325.9549 , 12351.5321 ,
12400.8893 , 12423.349 , 12482.8503 , 12502.43 ,
12905.5773 , 12921.1364 , 12943.1311 ,
12985.5595 , 13021.6447 , 13052.818 , 13085.2604 , 13127.8037 ,
13156.9911 , 13210.6977 , 13236.5414 , 13301.9624 , 13324.3509 ,
13421.579])
if (band == 'K') and short_exp:
# remove: 19518.4784 , 19593.2626 , 19618.5719 ,19678.046 ,19839.7764 ,20193.1799 ,20499.237 ,21279.1406 ,21580.5093 ,21711.1235 , 21873.507 ,22460.4183 ,22690.1765 ,22985.9156,23914.55, 24041.62,22742.1907
lines = np.array([
19642.4493 ,
19701.6455 , 19771.9063 ,
20008.0235 , 20275.9409 , 20339.697 , 20412.7192 ,
20563.6072 , 20729.032 , 20860.2122 , 20909.5976 ,
21176.5323 , 21249.5368 , 21507.1875 , 21537.4185 ,
21802.2757 , 21955.6857 ,
22125.4484 , 22312.8204 , 22517.9267
])
print("using short exposure line list")
elif band == 'K':
#drop: 19751.3895, 19736.4099, 21711.1235, 22
lines = np.array([
19518.4784 , 19593.2626 , 19618.5719 , 19642.4493 , 19678.046 ,
19701.6455 , 19771.9063 , 19839.7764 ,
20008.0235 , 20193.1799 , 20275.9409 , 20339.697 , 20412.7192 ,
20499.237 , 20563.6072 , 20729.032 , 20860.2122 , 20909.5976 ,
21176.5323 , 21249.5368 , 21279.1406 , 21507.1875 , 21537.4185 ,
21580.5093 , 21711.1235 , 21802.2757 , 21873.507 , 21955.6857 ,
22125.4484 , 22312.8204 , 22460.4183 , 22517.9267 , 22690.1765 ,
22742.1907 , 22985.9156, 23914.55, 24041.62])
if neon:
# http://www2.keck.hawaii.edu/inst/mosfire/data/MosfireArcs/mosfire_Ne_vac.list
# Trimmed using PDF of id'd lines
info("Picking Neon's arc line list")
if band == 'Y':
lines = np.array([
9668.071,
10298.238,
10565.303,
10801.001,
10847.448,
11146.072,
11180.585,
11393.552,
11412.257])
if band == 'J':
lines = np.array([
11393.552,
11412.257,
11525.900,
11539.503,
11617.260,
11792.271,
11988.194,
12069.636,
12462.799,
12692.674,
12915.546])
if band == 'H':
lines = np.array([
14933.886,
14990.415,
15144.236,
15195.083,
15234.877,
15352.384,
15411.803,
15608.478,
16027.147,
16272.797,
16049.737,
16479.254,
16793.378,
16866.255,
17166.622])
if band == 'K':
lines = np.array([
#19579.094 ,
19582.455 ,
20355.771 ,
21047.013 ,
21714.039 ,
22434.265 ,
22536.528 ,
22667.971 ,
23106.784 ,
23379.343 ,
23571.764 ,
23642.934 ,
#23918.541 ,
24168.025 ,
#24256.224 ,
#24371.661 ,
#24378.260 ,
#24390.011 ,
#24454.531 ,
#24459.775 ,
#24466.068 ,
#24471.606 ,
])
if argon:
info("Picking Argon's arc line list")
if band == 'Y':
lines = np.array([
9660.43,
9787.18,
10054.81,
10472.92,
10480.90,
10676.49,
10684.70,
10883.94,
11081.90, ])
if band == 'J':
lines = np.array([
11491.25,
11671.90,
11722.70,
11946.55,
12029.94,
12115.64,
12143.06,
12346.77,
12406.22,
12442.73,
12491.08,
12736.90,
12806.24,
12960.20,
13011.82,
13217.61,
13276.27,
13316.85,
13370.77,
13410.26,
13507.88,
13626.38 ])
if band == 'H':
lines = np.array([
14654.35,
14743.17,
15050.62,
15176.84,
15306.07,
15333.54,
15406.85,
15904.03,
15993.86,
16184.44,
16441.44,
16524.38,
16744.65,
16945.21,
17449.67,
17919.61])
if band == 'K':
lines = np.array([
19822.91,
19971.18,
20322.56,
20574.43,
20621.86,
20739.22,
20816.72,
20991.84,
21338.71,
21540.09,
22045.58,
22083.21,
23139.52,
23851.54,])
lines = np.array(lines)
return np.sort(lines)
def guess_wavelength_solution(slitno, header, bs):
"""Given a slit number guess the coefficient values
return [order, y0, alpha, sinbeta, gamma, delta]
"""
band = header['filter'].rstrip()
bmap = {"Y": 6, "J": 5, "H": 4, "K": 3}
order = bmap[band]
y0 = bs.csu_slit_to_pixel(slitno)
csupos_mm = bs.csu_slit_center(slitno)
[alpha_pixel, sinbeta_position, sinbeta_pixel, gamma_pixel,
delta_pixel] = param_guess_functions(band)
retv = [alpha_pixel(y0),
sinbeta_position(csupos_mm) + sinbeta_pixel(y0),
gamma_pixel(y0),
delta_pixel(y0),
order,
y0,
csupos_mm]
return retv
def refine_wavelength_guess(wave,spec,linelist):
"""Do a cross correlation with the sky lines to get a better
guess of the wavelength solution.
INPUTS:
---------
wave - wavelength array (needs to be same length as spec)
spec - spectrum flux correponding to those wavelengths
linelist - a list of line centroids of the reference
HISTORY:
--------
2013-06-27 - T. Do
"""
# find what is the average peak height to construct the reference
# spectrum template
peaks = signal.find_peaks_cwt(spec,np.array([2.0,4.0]),noise_perc=5.0,min_snr =1.5)
avePeak = np.mean(spec[peaks])
refSpec = np.zeros(len(spec))
for i in np.arange(len(linelist)):
refSpec = refSpec + Fit.gaussian([avePeak,linelist[i],2.0,0,0],wave)
corr = signal.correlate(spec,refSpec,mode='same')
lags = np.arange(len(spec))-len(spec)/2
# peak velocity corresponding to the pixel peak
peakInd = np.argmax(corr)
peakLag = lags[peakInd]
# return the number of pixels that need to be shifted
return -peakLag
def plot_mask_solution_ds9(fname, maskname, options):
"""makes a ds9 region file guessing the wavelength solution"""
(header, data, bs) = IO.readmosfits(fname, options)
linelist = pick_linelist(header)
ds9 = """# Region file format: DS9 version 4.1
global color=red
"""
pix = np.arange(2048)
colors = ["red", "blue"]
cidx = 0
for i in range(1,len(bs.ssl)+1):
slits = bs.scislit_to_csuslit(i)
info("Guessing: "+str(slits))
cidx = (cidx + 1) % 2
color = colors[cidx]
for slitno in slits:
guess = guess_wavelength_solution(slitno, header, bs)
ll = wavelength_model(guess, pix)
if bs.is_alignment_slitno(slitno): color = 'green'
for line in linelist:
x = np.argmin(np.abs(ll - line))
ds9 += "circle(%f, %f, 1) # color=%s text={}\n" % (x, guess[5],
color)
path = './'
fname = fname.rstrip(".fits")
fn = os.path.join(path, maskname, ("guess_waves_%s.reg" % fname))
try: os.remove(fn)
except: pass
try:
f = open(fn, 'w')
f.write(ds9)
f.close()
except:
error("Could not write %s" % fn)
def estimate_half_power_points(slitno, header, bs):
"""This helper function is used to determine the filter half-power points.
This function is primarily used by the flat-field code to determine the
on order regions of an image. """
band = header['filter'].rstrip()
parguess = guess_wavelength_solution(slitno, header, bs)
pix = np.arange(2048.)
ll = wavelength_model(parguess, pix)
hpp = Filters.hpp[band]
return [ np.argmin(np.abs(ll-hpp[0])), np.argmin(np.abs(ll-hpp[1])) ]
def xcor_known_lines(lines, ll, spec, spec0, options):
"""
lines[N]: list of lines in wavelength units
ll[2048]: lambda vector
spec[2048]: spectrum vector (as function of lambda)
options: wavelength options
"""
inf = np.inf
dxs = []
sigs = []
pix = np.arange(2048.)
for lam in lines:
f = options["fractional-wavelength-search"]
roi = np.where((f*lam < ll) & (ll < lam/f))[0]
if not roi.any():
dxs.append(inf)
sigs.append(inf)
continue
lags = np.arange(-len(roi)/2, len(roi)/2)
cors = Fit.xcor(spec[roi], spec0[roi], lags)
fit = Fit.mpfitpeak(lags, cors)
if (fit.perror is None) or (fit.status < 0):
dxs.append(inf)
sigs.append(inf)
continue
dxs.append(fit.params[1])
sigs.append(fit.params[2])
return list(map(np.array, [dxs, sigs]))
def find_known_lines(lines, ll, spec, options):
"""
lines[N]: list of lines in wavelength units
ll[2048]: lambda vector
spec[2048]: spectrum vector (as function of lambda)
options: wavelength options
"""
inf = np.inf
xs = []
sxs = []
sigmas = []
pix = np.arange(len(spec))
for lam in lines:
f = options["fractional-wavelength-search"]
roi = (f*lam < ll) & (ll < lam/f)
if not roi.any():
xs.append(0.0)
sxs.append(inf)
continue
istd = 1/np.sqrt(np.abs(spec[roi].data))
lsf = Fit.mpfitpeak(pix[roi], spec[roi].data,
error=istd)
if (lsf.perror is None) or (lsf.status < 0):
xs.append(0.0)
sxs.append(inf)
continue
mnpix = np.min(pix[roi])
mxpix = np.max(pix[roi])
if (mnpix + 4) > lsf.params[1] < (mxpix-4):
xs.append(0.)
sxs.append(inf)
continue
if mnpix < 7:
xs.append(0.0)
sxs.append(inf)
continue
if mxpix > 2040:
xs.append(0.0)
sxs.append(inf)
continue
xs.append(lsf.params[1])
sxs.append(lsf.perror[1])
sigmas.append(lsf.params[2])
return list(map(np.array, [xs, sxs, sigmas]))
def fit_chebyshev_to_lines(xs, sxs, lines, options):
"""Fit a chebyshev function to the best fit determined lines.
Note the best fits may fail and the first part of this function culls
bad fits, while the second part of the function has all the chebyshev
action. For reference the numpy.polynomial.chebyshev package is imported
as CV """
ok = np.isfinite(sxs)
L = len(xs[ok])
badfit = np.zeros(options["chebyshev-degree"]+1)
baddelt = np.ones(L) * 9999.0
if L < 6:
return [baddelt, badfit, lines[ok]]
if np.median(lines) < 1000:
error("Units fed to this function are likely in micron but "
"should be in angstrom")
raise Exception("Units fed to this function are likely in micron but "
"should be in angstrom")
cfit = CV.chebfit(xs[ok], lines[ok], options["chebyshev-degree"])
delt = CV.chebval(xs[ok], cfit) - lines[ok]
if cfit is None:
return [baddelt, badfit, lines[ok]]
return [np.array(delt), np.array(cfit), np.array(lines[ok])]
def fit_model_to_lines(xs, sxs, lines, parguess, options, fixed):
ok = np.isfinite(sxs)
if len(np.where(ok)[0]) < 3:
return [[np.inf], parguess, None]
slambda = sxs * dlambda_model(parguess)
parinfo = [
{'fixed': 0, 'value': parguess[0], 'parname': 'alpha', 'step': 1e-5,
'limited': [0,0], 'limits': [0,0]},
{'fixed': 0, 'value': parguess[1], 'parname': 'sinbeta',
'step': 1e-7, 'limited': [0,0], 'limits': [0, 0]},
{'fixed': fixed, 'value': parguess[2], 'parname': 'gamma','step': 1e-12,
'limited': [1,1], 'limits': [-50e-13, 50e-13]},
{'fixed': fixed, 'value': parguess[3], 'parname': 'delta', 'step': 1,
'limited': [1,1], 'limits': [0, 2048]},
{'fixed': 1, 'value': parguess[4], 'parname': 'order',
'limited': [0,0], 'limits': [3, 7]},
{'fixed': 1, 'value': parguess[5], 'parname': 'Y',
'limited': [0,0], 'limits': [0, 2048]}
]
merit_function = Fit.mpfit_residuals(wavelength_model)
lsf = Fit.mpfit_do(merit_function, xs[ok], lines[ok],
parinfo, error=slambda[ok])
delt = np.abs(wavelength_model(lsf.params, xs[ok]) - lines[ok])
xsOK = xs[ok]
linesOK = lines[ok]
return [ delt, lsf.params, lsf.perror]
def guesslims(spec):
"""Guess the spectral limits"""
f = 1.1
s = spec.copy()
s.sort()
return [-500, s[-10]*f]
def polyfit_err(x,y,order):
coef = np.polyfit(x,y,order)
fun = np.poly1d(coef)
return fun, coef, np.std(y - fun(x))
class InteractiveSolution:
header = None
data = None
bs = None
parguess = None
linelist0 = None
foundlines = None
options = None
slitno = None # the science slit number.
spec = None
good_solution = False
done = False
starting_pos = None # This overrides the extract position for longslits
ll = None
pix = None
xlim = None
ylim = None
MAD = None
STD = None
first_time = True
def __init__(self, fig, mfits, linelist, options, slitno, outfilename,
solutions=None, noninteractive=False, starting_pos=None):
self.header = mfits[0]
self.data = mfits[1]
self.bs = mfits[2]
self.options = options
self.linelist0 = linelist
self.slitno = slitno
self.fig = fig
self.ax1=pl.subplot(2,1,1)
self.ax2=pl.subplot(2,1,2)
self.done=False
self.outfilename = outfilename
self.starting_pos = starting_pos
self.noninteractive = noninteractive
self.pix = np.arange(2048)
band = self.header["filter"].rstrip()
self.xrng = Filters.hpp[band][:]
self.band = band
self.xrng[0] *= 0.99
self.xrng[1] /= 0.99
self.sigma_clip = False
self.xlim = self.xrng
if solutions is None:
self.solutions = list(range(len(self.bs.ssl)))
else:
self.solutions = solutions
if self.noninteractive:
self.setup()
self.fit_event(0,0)
#self.nextobject(0,0) ### the call to next object is built-in in fit_event
while self.done is False:
self.fit_event(0,0)
self.nextobject(0,0)
else:
# follow line prevents window from going full screen when the
# 'f'it button is pressed.
pl.rcParams['keymap.fullscreen'] = ''
self.cid = self.fig.canvas.mpl_connect('key_press_event', self)
self.setup()
self.fit_event(0,0)
def setup(self):
csuslits = self.bs.scislit_to_csuslit(self.slitno)
try:
l = len(csuslits)
if l > 1:
csuslit = csuslits[l//2]
else:
csuslit = csuslits[0]
except:
csuslit = csuslits
# info(str(csuslits)+" "+str(csuslit))
info('CSU slits {} acting as slit number {}'.format(str(csuslits), str(csuslit)))
self.linelist = self.linelist0
if self.starting_pos is None:
self.extract_pos = self.bs.science_slit_to_pixel(self.slitno)
else:
info("LONGSLIT mode: forced longslit center line")
'''This is used in longslits to handle a forced start position'''
self.extract_pos = self.starting_pos
info("Extracting at %i " % self.extract_pos)
S = self.solutions[self.slitno-1]
if type(S) is not int: # previously setup
self.MAD = S["MAD"]
self.STD = S["STD"]
self.linelist = S["linelist"]
self.foundlines = S["foundlines"]
self.foundlinesig = S["foundlinesig"]
self.extract_pos = S["extract_pos"]
self.cfit = S["sol_1d"][1]
else:
self.MAD = self.STD = self.foundlines = self.linesig = None
tx = np.arange(0,2048,100)
parguess = guess_wavelength_solution(csuslit, self.header,
self.bs)
ll = wavelength_model(parguess, tx)
refineGuess= True
if refineGuess:
# do an inital fit
cfit = CV.chebfit(tx, ll, self.options["chebyshev-degree"])
inputWave = CV.chebval(self.pix, cfit)
self.spec = \
np.ma.mean(self.data[self.extract_pos-1:self.extract_pos+1, :],
axis=0) # axis = 0 is spatial direction
# refine the wavelength guess using the reference lines
refShift = refine_wavelength_guess(inputWave,self.spec,self.linelist)
# add the shift to the wavelength solution
ll = wavelength_model(parguess, tx+refShift)
self.cfit = CV.chebfit(tx, ll, self.options["chebyshev-degree"])
# I don't understand why there is this offset added, but I am removing it
# because the refineGuess method results in more accurate positioning.
# if self.band != 'J':
# self.cfit[0] -= 16.0
self.spec = \
np.ma.mean(self.data[self.extract_pos-1:self.extract_pos+1, :],
axis=0) # axis = 0 is spatial direction
self.ll = CV.chebval(self.pix, self.cfit)
if self.noninteractive:
pass
else:
info("Launching graphics display. ")
self.redraw()
def toggle_noninteractive(self,x,y):
info("############ NON INTERACTIVE MODE ENABLED ###########")
info("# From now on, the fit will proceed automatically #")
info("#####################################################")
self.noninteractive = True
self.quit(0,0)
self.nextobject(0,0)
def draw_found_lines(self):
self.ax1.grid(True)
ymax = self.ax1.get_ylim()[1]
if self.foundlines is not None:
foundlams = CV.chebval(self.foundlines, self.cfit)
ok = np.isfinite(self.foundlinesig)
for i in range(len(self.linelist)):
if not ok[i]: continue
D = (foundlams[i] - self.linelist[i])
self.ax1.axvline(foundlams[i], color='orange', ymax=.75, ymin=.25,
linewidth=1.5)
self.ax1.text(foundlams[i], 1500, "%1.2f" % D, rotation='vertical',
size=10)
self.ax2.set_xlim(self.xlim)
self.ax2.grid(True)
#pl.axhline(0.1)
#pl.axhline(-0.1)
self.ax2.axhline(self.STD)
self.ax2.axhline(-1*self.STD)
if self.STD < 0.1:
fmt = 'go'
else:
fmt = 'bo'
self.ax2.plot(self.linelist[ok], (foundlams[ok] - self.linelist[ok]), fmt)
self.ax2.set_xlim(self.xlim)
def draw_done(self):
if self.done is False:
return
else:
mid = np.mean(self.xlim)*.99
self.ax1.text(mid, 0, 'Done!', size=32, color='red')
def draw_vertical_line_marks(self):
ymax = self.ax1.get_ylim()[1]
i = 0
for line in self.linelist:
self.ax1.axvline(line, color='red', linewidth=.5)
self.ax1.text(line, ymax*.75, "%5.1f" % (line),
rotation='vertical', color='black')
i = i+1
fwl = self.options['fractional-wavelength-search']
self.ax1.plot([line*fwl,line/fwl], [0,0], linewidth=2)
def redraw(self):
pl.ion()
pl.clf()
self.fig = self.fig
self.ax1=pl.subplot(2,1,1)
self.ax2=pl.subplot(2,1,2)
pl.subplots_adjust(left=.1,right=.95,bottom=.1,top=.90)
self.ax1.plot(self.ll, self.spec, linestyle='steps-mid')
if self.MAD is None:
pl.title("[%i] Press 'z' to zoom, 'x' to unzoom, 'c' to shift, "
"'f' to fit, 'k' to toggle sigma clipping. 'h' for help" % self.slitno)
else:
name = self.bs.ssl[self.slitno-1]["Target_Name"]
pl.title(u"[%i,%s, p%i] Best fit STD: %0.2f $\AA$, MAD: %0.2f $\AA$: " \
% (self.slitno, name, self.extract_pos,self.STD, self.MAD))
pl.ioff()
self.draw_vertical_line_marks()
self.draw_found_lines()
self.fig.show()
pl.ion()
ymax = self.ax1.get_ylim()[1]
self.ax1.set_xlim(self.xlim)
if self.band == 'Y':
self.ax1.set_ylim([-100, 1000])
else:
self.ax1.set_ylim([-1000, ymax*.8])
if np.max(self.spec) < 200:
self.ax1.set_ylim([-100,500])
self.draw_done()
def shift(self, x, y):
"""Shift the observed spectrum"""
theline = np.argmin(np.abs(x - self.linelist))
delt = x - self.linelist[theline]
self.ll -= delt
self.redraw()
def drop_point(self, x, y):
"""Drop point nearest in x from set"""
theline = np.argmin(np.abs(x - self.linelist))
self.linelist = np.delete(self.linelist, theline)
if self.foundlines is not None:
self.foundlines = np.delete(self.foundlines, theline)
self.foundlinesig = np.delete(self.foundlinesig, theline)
self.redraw()
def unzoom(self, x, y):
"""Show the full spectrum"""
self.xlim = self.xrng
pl.ion()
self.ax1.set_xlim(self.xlim)
self.ax2.set_xlim(self.xlim)
def zoom(self, x, y):
"""Zoom/pan the view"""
self.xlim = [x*.988,x/.988]
pl.ion()
self.ax1.set_xlim(self.xlim)
self.ax2.set_xlim(self.xlim)
def fastforward(self, x, y):
"""Fast forward to next uncalib obj """
for i in range(self.slitno+1, len(self.solutions)):
if type(self.solutions[i]) is int:
self.slitno = i-1
self.setup()
break
def nextobject(self, x, y):
"""Go to the next object"""
self.slitno += 1
self.done = False
if self.slitno > len(self.bs.ssl):
self.done = True
self.slitno -= 1
if self.noninteractive is False:
self.draw_done()
info("Saving to: "+str(self.outfilename))
np.save(self.outfilename, np.array(self.solutions))
if self.done is False:
self.setup()
self.fit_event(0,0)
def prevobject(self, x, y):
"""Go to the previous object"""
self.slitno -= 1
self.done = False
if self.slitno < 1:
warning("first limit")
self.slitno = 1
self.setup()
def quit(self, x, y):
"""Quit and save the results """
info("Closing figure")
self.fig.canvas.mpl_disconnect(self.cid)
pl.close(self.fig)
def reset(self, x, y):
"""Reset the fitting performed on this object """
self.MAD = None
self.solutions[self.slitno-1] = self.slitno
self.setup()
def savefig(self, x, y):
"""Save the figure to disk"""
pass
def toggle_sigma_clip(self,x,y):
if self.sigma_clip is True:
self.sigma_clip = False
info("Sigma clipping disabled")
else:
self.sigma_clip = True
info("Sigma clipping enabled")
self.fit_event(0,0)
def fit_event(self, x, y):
"""Fit Chebyshev polynomial to predicted line locations """
[xs, sxs, sigmas] = find_known_lines(self.linelist, self.ll,
self.spec, self.options)
self.foundlines = xs
self.foundlinesig = sxs
mask = (np.isfinite(sxs))
local_linelist=self.linelist[mask]
xs = xs[mask]
sxs = sxs[mask]
[deltas, cfit, perror] = fit_chebyshev_to_lines(xs, sxs,
local_linelist, self.options)
self.cfit = cfit
self.ll = CV.chebval(self.pix, self.cfit)
# Calculate current std error
error = np.std(deltas[np.isfinite(deltas)])
if self.sigma_clip is True or self.noninteractive:
# prepare a sigma tolerance (reject values of deltas > tolerance * sigma)
tolerance = 3
# if the std error is > 0.10, iteratively reject lines
while error>0.10:
# info("#####################################################")
warning("Large error detected. Iterating with sigma clipping")
warning("Current error is "+str(error))
warning("Current tolerance is "+str(tolerance)+" sigmas")
warning("Number of lines used for fit: "+str(len(xs)))
warning("Filtering with rms = "+str(np.std(deltas[np.isfinite(deltas)])))
mask = (abs(deltas)<tolerance*np.std(deltas[np.isfinite(deltas)]))
warning("Number of rejected lines: "+str(len(xs)-len(xs[mask])))
local_linelist = local_linelist[mask]
xs=xs[mask]
sxs=sxs[mask]
info("Fitting again...")
[deltas, cfit, perror] = fit_chebyshev_to_lines(xs, sxs,
local_linelist, self.options)
error = np.std(deltas[np.isfinite(deltas)])
if error<=0.10:
info("The error is now {}. The error is acceptable, continuing...".format(error))
else:
info("The error is now {}".format(error))
# info("#####################################################")
self.cfit = cfit
self.ll = CV.chebval(self.pix, self.cfit)
tolerance = tolerance - 0.2
self.foundlines = xs
self.foundlinesig = sxs
self.linelist = local_linelist
ok = np.isfinite(deltas)
self.STD = np.std(deltas[ok])
self.MAD = np.median(np.abs(deltas[ok]))
debug("STD: %1.2f MAD: %1.2f" % (self.STD, self.MAD))
debug(str(self.cfit))
self.solutions[self.slitno-1] = {"linelist": self.linelist, "MAD":
self.MAD, "foundlines": self.foundlines, "foundlinesig":
self.foundlinesig, "sol_1d": [deltas, cfit, sigmas], "STD":
self.STD, "slitno": self.slitno, "extract_pos":
self.extract_pos}
info('Stored slit number: {}'.format(str(self.solutions[self.slitno-1]['slitno'])))
if self.noninteractive is False:
self.redraw()
else:
self.nextobject(0,0)
def __call__(self, event):
kp = event.key
x = event.xdata
y = event.ydata
info( str(kp)+" "+str(x)+" "+str(y))
actions_mouseless = {".": self.fastforward, "n": self.nextobject, "p":
self.prevobject, "q": self.quit, "r": self.reset, "f":
self.fit_event, "k": self.toggle_sigma_clip, "\\": self.fit_event, "b": self.toggle_noninteractive}
actions = { "c": self.shift, "d": self.drop_point,
"z": self.zoom, "x": self.unzoom, "s": self.savefig}
if (kp == 'h') or (kp == '?'):
info("Commands Desc")
for key, value in list(actions.items()):
info("%8s %s" % (key, value.__doc__))
for key, value in list(actions_mouseless.items()):
info("%8s %s" % (key, value.__doc__))
if kp in actions_mouseless:
actions_mouseless[kp](x, y)
if x is None: return
if y is None: return
if kp in actions:
actions[kp](x, y)
def fit_wavelength_solution(data, parguess, lines, options,
slitno, search_num=145, fixed=False):
"""Tweaks the guessed parameter values and provides 1d lambda solution
"""
pix = np.arange(2048.)
MAD = np.inf
y0 = parguess[5]
spec = np.ma.median(data[y0-1:y0+1, :],
axis=0) # axis = 0 is spatial direction
d = 0.1
dsinbetas = np.sort(np.abs(np.linspace(-d/2., d/2., search_num)))
sinbetadirection = 1.0
iteration = 0
DRAW = False
if DRAW:
pl.ion()
pl.figure(2, figsize=(16,5))
pl.xlim([2.03,2.3])
#print "iter dsb MAD"
for dsinbeta in dsinbetas:
dsinbeta *= sinbetadirection
sinbetadirection *= -1
pars = parguess
pars[1] = parguess[1] + dsinbeta
ll = wavelength_model(parguess, pix)
[xs, sxs, sigmas] = find_known_lines(lines, ll, spec, options)
[deltas, params, perror] = fit_model_to_lines(xs, sxs, lines,
pars, options, fixed)
if DRAW:
pl.figure(2)
pl.xlim([1.94,2.1])
ll2 = wavelength_model(params, pix)
pl.plot(ll2, spec)
for line in lines:
pl.axvline(line ,color='red')
pl.draw()
MAD = np.ma.median(deltas)
iteration += 1
#print "%2.2i] %3.0i %1.4f %1.4f" % (slitno, iteration, dsinbeta, MAD)
if MAD > MADLIMIT:
continue
else:
#print "%i] found: %3i %+1.5f %3.6f" % (slitno, iteration, dsinbeta, MAD)
break
if MAD <= MADLIMIT:
#print("%3i: %3.5f %4.3f %3.3e %4.1f %1.4f" % (slitno, params[0], params[1],
#params[2], params[3], MAD))
return [deltas, params, perror, sigmas]
else:
warning("%3i: Could not find parameters" % slitano)
return [[], parguess, None, []]
def construct_model(slitno):
"""Given a matrix of Chebyshev polynomials describing the wavelength
solution per pixel, return a 2-d function returning the wavelength
solution.
For a set of Chebyshev coefficients c_1 to c_6 there is a strong
correlation between c_n and c_1 in pixel space. This correlation is
used to produce the 2-d wavelength solution.
"""
global coeffs, data, linelist, options
info("Constructing model on %i" % slitno)
pix = np.arange(2048)
cfits = coeffs[slitno-1]['2d']['coeffs']
delts = coeffs[slitno-1]['2d']['delts']
sds = coeffs[slitno-1]['2d']['lambdaRMS']
mads = coeffs[slitno-1]['2d']['lambdaMAD']
positions= coeffs[slitno-1]['2d']['positions']
ok = (sds < 0.2) & (mads < 0.1)
c0coeff = Fit.polyfit_sigclip(positions[ok], cfits[ok,0], 1, nmad=3)
c0fun = np.poly1d(c0coeff)
cfit_coeffs = [c0coeff]
cfit_funs = [c0fun]
for i in range(1, cfits.shape[1]):
if i < 3: order = 1
else: order = 1
ci_coeff = Fit.polyfit_sigclip(cfits[ok,0], cfits[ok,i], order, nmad=3)
cfit_coeffs.append(ci_coeff)
ci_fun = np.poly1d(ci_coeff)
cfit_funs.append(ci_fun)
lambdaRMS = []
lambdaMAD = []
cpolys = []
# Check the fits now
if True:
for i in range(len(positions)):
pos = positions[i]
c0 = c0fun(pos)
cs = [c0]
cs.extend([f(c0) for f in cfit_funs[1:]])
ll_now = CV.chebval(pix, cs)
spec_here = np.ma.median(data[pos-1:pos+1,:], axis=0)
[xs, sxs, sigmas] = find_known_lines(linelist,
ll_now, spec_here, options)
[delt, cfit, lines] = fit_chebyshev_to_lines(xs, sxs,
linelist, options)
rms = np.std(delt)
rmsR = np.median(np.abs(lines/delt))
if rms > .2:
pre = bcolors.FAIL
elif rms > .1:
pre = bcolors.OKBLUE
else:
pre = bcolors.ENDC
info(pre + "2d model S%2.2i p%4.4i: residual %1.3f Angstrom " \
"RMS / %3.1e lam/dlam / %1.3f Angstrom MAD" % (slitno, pos,
rms, rmsR, np.median(np.abs(delt))) + bcolors.ENDC)
lambdaRMS.append(np.std(delt))
lambdaMAD.append(np.median(np.abs(delt)))
cpolys.append(cs)
"""The return function takes a pixel position and returns wavelength
in angstrom"""
return {"functions": np.array(cfit_coeffs), "cpolys": np.array(cpolys),
"RMS": np.array(lambdaRMS), "MAD": np.array(lambdaMAD),
"positions": positions[ok]}
else:
info("No fit stats")
return {"functions": np.array(cfit_coeffs), "cpolys": [], "RMS":
np.zeros(len(positions[ok])), "MAD":
np.zeros(len(positions[ok])), "positions": positions[ok]}
#
# Two dimensional wavelength fitting
#
def fit_outwards_refit(data, bs, sol_1d, lines, options, start, bottom, top,
slitno, linelist2=None, data2=None, sol_1d2=None):
'''Fit Chebyshev polynomials across a slit bounded by bottom to top.
Args:
data: 2k x 2k data frame
bs: The barset
sol_1d: The guess solution taken at the 'start' position
lines: The list of lines (wavelength in Angstrom)
options: Options dictionary
start: The start position (should be where the interactive wavelength
fit occurred.
bottom/top: The bottom/top of the slit in pixels
Optional Arguments:
linelist2 - second line list used for when both the neon and argon
lines with be fit simultaneously.
data2 - is the spectra data set that goes along with the argon
line list when argon and neon are used simultaneously.
Returns:
Npixel length array of dictionaries containing:
'coeffs': Chebyshev coefficients
'delts': The offset of the line position to the measured position
in Angstrom
'lambdaRMS': The standard deviaiton of delts
'lambdaMAD': The MAD of delts
'positions': Line positions in pixel units'
The wavelength solution at some pixel is computed by
sol = fit_outwards_refit(...)
wave_vector = CV.chebval(arange(2048), sol[pixelnum]['coeffs'])
where wave_vector holds the wavelength in angstroms of a
row.
Modification history:
2014 June 17 MK - Added code to handle two line lists simultaneously
This will allow observers to fit both the Argon
and the Neon lines. Added optional parameters data2
linelist2.
'''
bottom = int(bottom)
top = int(top)
lags = np.arange(-50,50)
pix = np.arange(2048.)
linelist = lines
def fit_parameters(yhere):
"""
Return chebyshev fit to a pixel column
2014 June 17 MK- Added a second set a variables to indicate that there
are two sets of lines we want to fit. This is used
for the arc line data. We want to fit both Ne and
Argon simultaneously.
"""
cfit = sol_1d[1]
spec_here = np.ma.median(data[int(yhere)-2:int(yhere)+2, :], axis=0)
shift = Fit.xcor_peak(spec_here, spec0, lags)
ll_here = CV.chebval(pix - shift, cfit)
[xs, sxs, sigmas] = find_known_lines(linelist,
ll_here, spec_here, options)
if data2 is not None:
cfit2 = sol_1d2[1]
spec_here2 = np.ma.median(data2[yhere-2:yhere+2, :], axis=0)
shift2 = Fit.xcor_peak(spec_here2, spec2, lags)
ll_here2 = CV.chebval(pix - shift2, cfit2)
[xs2, sxs2, sigmas2] = find_known_lines(linelist2,
ll_here2, spec_here2, options)
""" Fit a chebyshev to the measured positions """
if data2 is not None:
"fit both line lists"
"""combine the line lists"""
clinelist= np.concatenate([linelist,linelist2])
cxs = np.concatenate([xs, xs2])
csxs = np.concatenate([sxs, sxs2])
"""combine the measured xs and sxs arrays that have the measured
line positions"""
[delt, cfit, lines] = fit_chebyshev_to_lines(cxs, csxs,
clinelist, options)
else:
[delt, cfit, lines] = fit_chebyshev_to_lines(xs, sxs,
linelist, options)
#if np.std(delt) < .01: pdb.set_trace()
debug("resid ang S%2.2i @ p%4.0i: %1.2f rms %1.2f mad [shift%2.0f]" % \
(slitno+1, yhere, np.std(delt), np.median(np.abs(delt)),
shift))
return cfit, delt
def sweep(positions):
ret = []
cfits = []
sds = []
mads = []
for position in positions:
cfit, delt = fit_parameters(position)
cfits.append(cfit)
sds.append(np.std(delt))
mads.append(np.median(np.abs(delt)))
cfits, sds, mads = list(map(np.array, [cfits, sds, mads]))
#model = construct_model(cfits, positions, sds)
assert(len(positions) == len(cfits))
return {'coeffs': cfits, 'delts': delt, 'lambdaRMS':
sds, 'lambdaMAD': mads, "positions": np.array(positions)}
""" Start of main section of fit_outwards """
pix = np.arange(2048.)
positions = np.concatenate((np.arange(start, top, 1),
np.arange(start-1,bottom,-1)))
positions = np.arange(bottom, top, 1)
info("Computing 0 spectrum at %i" % start)
spec0 = np.ma.median(data[start-1:start+1, :], axis=0)
if data2 is not None:
spec2 = np.ma.median(data2[start-1:start+1, :], axis=0)
params = sweep(positions)
return params
class NoSuchFit(Exception):
pass
def fit_to_coefficients(fits, pos, slitno=None):
"""Given a fit structure from fit_outwards_refit and a pixel y position
return the coefficients of a Chebyshev Polynomial"""
if slitno is None:
slitno = 1
found = False
for fit in fits:
if type(fit) is int: continue
fitpos = fit["positions"]
mn = min(fitpos) ; mx = max(fitpos)
if (pos >= mn) and (pos <= mx):
found = True
break
slitno += 1
if not found:
warning("Position %i does not have a fitted wavelength " \
" solution" % pos)
raise NoSuchFit("Position %i does not have a fitted wavelength " \
" solution" % pos)
return np.zeros(5)
fit = fits[slitno-1]
else:
fit = fits[slitno-1]
fitpos = fit["positions"]
mn = min(fitpos) ; mx = max(fitpos)
if not ((pos >= mn) and (pos <= mx)):
warning("Slitno %i has a pixel range of %i-%i but " \
"position %i was requested" % (slitno, mn, mx, pos))
raise Exception("Slitno %i has a pixel range of %i-%i but " \
"position %i was requested" % (slitno, mn, mx, pos))
return np.zeros(5)
funs = fit["functions"]
cs = np.zeros(funs.shape[0])
cs[0] = np.poly1d(funs[0])(pos)
for i in range(1,len(cs)):
cs[i] = np.poly1d(funs[i])(cs[0])
return np.array(cs)
def fit_to_lambda(fits, pix_lambda, pix_spatial, slitno=None):
"""Given a fit structure from fit_outwards_refit and a pixel x,y position
return the wavelength value"""
cs = fit_to_coefficients(fits, pix_spatial, slitno=slitno)
return CV.chebval(pix_lambda, cs)
# Model Functions
def wavelength_model(p, x):
"""Returns wavelength [um] as function of pixel (x)
The parameter list, p, contains
p[0:3] -- alpha, beta, gamma, delta, model parameters
p[4] -- the grating order.
p[5] -- the pixel y position on detector [pix]
x -- the x pixel position (dispersion direction)
returns wavelength [micron]
"""
order = p[4]
y = p[5]
(alpha, sinbeta, gamma, delta) = p[0:4]
sinbeta = np.radians(sinbeta)
d = 1e3/110.5 # Groove spacing in micron
pixelsize, focal_length = 18.0, 250e3 # micron
scale = pixelsize/focal_length
costerm = np.cos(scale * (y-1024))
return (alpha/(order/d) * 1/costerm * \
(np.sin(scale * (x-1024)) + sinbeta) + \
gamma * (x - delta)**3)*1e4
def mask_model(p, xs):
"""Fit a continuous smooth function to parameters in the mask.
parameters:
linear model is:
x = xs - p[3] 2 3
p[0] + p[1] x + p[2] x + p[3] x + discontinuity
p[4:] -- [N] list of discontinuities
"""
cpix = p[0]
cpar = p[1]
radius_pix = p[2]
radius_par = p[3]
coeffs = p[4:]
vals = []
for i in range(len(coeffs)):
x = np.array(xs[i]) - p[3]
c = coeffs[i]
y = p[0] + p[1] * x + p[2] * x*x + c
vals.extend(y)
return np.array(vals).ravel() * 1e4
def plot_mask_fits(maskname, fname, options):
from matplotlib.backends.backend_pdf import PdfPages
mfits = IO.readmosfits(fname, options)
header, data, bs = mfits
band = header['filter'].rstrip()
fname = fname.rstrip(".fits")
Lmask = IO.load_lambdamodel(fname, maskname, band, options)
Lslit = IO.load_lambdadata(fname, maskname, band, options)
assert(len(Lmask) == len(Lslit))
outname = os.path.join(path, "mask_fit_%s.pdf" % fname)
pp = PdfPages(outname)
for i in range(len(Lmask)):
print(i)
ll = Lmask[i]
ls = Lslit[i]
coeffs = ls['2d']['coeffs']
sds = ls['2d']['lambdaRMS']
posc = ls['2d']['positions']
fits = ll['functions']
pos = ll['positions']
#N = fits.shape[1]
N = 6
ny = 2.0
nx = np.ceil(N/ny)
pl.clf()
c0 = np.poly1d(fits[0])
c0s = c0(pos)
for i in range(1,N):
ax = self.fig.add_subplot(int(nx),int(ny),i)
f = np.poly1d(fits[i])
pl.plot(pos, f(c0s), color='orange')
ylim = pl.ylim()
pl.scatter(posc, coeffs[:, i])
pl.ylim(ylim)
#newfit = np.poly1d(Fit.polyfit_clip(c0(posc), coeffs[:,i], 0))
#pl.plot(pos, newfit(c0(pos)))
pp.savefig()
pp.close()
def plot_sky_spectra(maskname, fname, options, short_exp = False):
from matplotlib.backends.backend_pdf import PdfPages
fp = os.path.join(options['indir'], fname)
mfits = IO.readmosfits(fp)
header, data, bs = mfits
band = header["filter"].rstrip()
fname = fname.rstrip(".fits")
solutions = IO.load_lambdadata(fname, maskname, band, options)
outname = os.path.join(path, "sky_spectra_%s.pdf" % fname)
pp = PdfPages(outname)
band = header['filter'].rstrip()
# determine region to cutoff spectra for xlims
linelist = pick_linelist(header, short_exp = short_exp)
hpps = Filters.hpp[band]
# Pick top 95% of flux for ylims
sdata = np.sort(data, None)
ymax = sdata[-15000]
pix = np.arange(2048)
for solution in solutions:
slitno = solution["slitno"]
parameters = solution["center_sol"][0]
info("Slit: {0}".format(slitno))
parguess = guess_wavelength_solution(slitno, header, bs)
y0 = parguess[-2]
ll = wavelength_model(parameters, pix)
measured = data[y0, :]
pl.clf()
pl.title("Slit {0}".format(solution["slitno"]))
pl.plot(ll, measured, linewidth=.2)
pl.xlim(hpps)
pl.ylim(-30, ymax)
for line in linelist:
pl.axvline(line, color='red', linewidth=.1)
pp.savefig()
pp.close()
def plot_data_quality(maskname, fname, options):
from matplotlib.backends.backend_pdf import PdfPages
path = os.path.join(options["indir"])
if not os.path.exists(path):
error("Output directory '%s' does not exist. This "
"directory should exist." % path)
raise Exception("Output directory '%s' does not exist. This "
"directory should exist." % path)
fp = os.path.join(path, fname)
mfits = IO.readmosfits(fp)
header, data, bs = mfits
fname = fname.rstrip(".fits")
path = './'
solname = os.path.join(path, "lambda_coeffs_%s.npy" % fname)
solutions = np.load(solname)
solname = os.path.join(path, "mask_solution_%s.npy" % fname)
masksol = np.load(solname)[0]
outname = os.path.join(path, "wavelength_fits_%s.pdf" % fname)
pp = PdfPages(outname)
filter_fun = (lambda x:
(x[1] is not None) and
(x[1][0] < 1e-5) and
(x[2] < .2) and
(x[3] == True))
all_pix = []
all_alphas = []
all_betas = []
all_gammas = []
all_deltas = []
for solution in solutions:
sol_2d = solution["2d"]
info("Slit: {0}".format(solution["slitno"]))
ff = list(filter(filter_fun, sol_2d))
ar = np.array([x[0] for x in ff])
if len(ar) == 0: continue
pixels = ar[:,5]
alphas = ar[:,0]
betas = ar[:,1]
gammas = ar[:,2]
deltas = ar[:,3]
sds = ar[:,4]
all_pix.extend(pixels)
all_alphas.extend(alphas)
all_betas.extend(betas)
all_gammas.extend(gammas)
all_deltas.extend(deltas)
alphamodel = np.poly1d(np.polyfit(pixels, alphas, 1))
betamodel = np.poly1d(np.polyfit(pixels, betas, 1))
gammamodel = np.poly1d(np.polyfit(pixels, gammas, 1))
deltamodel = np.poly1d(np.polyfit(pixels, deltas, 1))
info("Scatters: {0:3.5} {1:3.5} {2:3.5} {3:3.5}".format(
np.std(alphas-alphamodel(pixels)),
np.std(betas-betamodel(pixels)),
np.std(gammas-gammamodel(pixels)),
np.std(deltas-deltamodel(pixels)),
))
pl.clf()
pl.subplot(2,2,1)
pl.title("Slit {0}".format(solution["slitno"]))
pl.scatter(pixels, alphas)
pl.plot(pixels, alphamodel(pixels))
pl.ylim([.993,1/.993])
pl.xticks(rotation=90)
pl.ylabel(r'$\alpha$')
pl.subplot(2,2,2)
pl.scatter(pixels, betas)
pl.plot(pixels, betamodel(pixels))
pl.xticks(rotation=90)
pl.ylabel(r'$\beta$')
pl.subplot(2,2,3)
pl.scatter(pixels, gammas)
pl.plot(pixels, gammamodel(pixels))
pl.ylim([0,1e-12])
pl.xticks(rotation=90)
pl.ylabel(r'$\gamma$')
pl.subplot(2,2,4)
pl.scatter(pixels, deltas)
pl.plot(pixels, deltamodel(pixels))
pl.xticks(rotation=90)
pl.ylabel(r'$\delta$')
pp.savefig()
band = header['filter'].rstrip()
[alpha_pixel, sinbeta_position, sinbeta_pixel, gamma_pixel,
delta_pixel] = param_guess_functions(band)
pl.clf()
pl.subplot(1,1,1)
pl.scatter(all_pix, all_alphas, c=all_deltas)
pl.plot(all_pix, alpha_pixel(all_pix), 'r')
ff = np.poly1d(np.polyfit(all_pix, all_alphas, 4))
pl.plot(all_pix, ff(all_pix))
info("Alpha: "+str(ff))
pl.ylabel(r'$\alpha$')
pp.savefig()
pl.clf()
delts = all_alphas - ff(all_pix)
pl.scatter(all_pix, delts, c=all_gammas)
pl.ylabel(r'$\Delta \alpha$')
info("Scatter is {0} pixels".format(np.std(delts)*2048))
pp.savefig()
pl.clf()
pl.scatter(all_pix, all_betas, s=.1)
pl.ylabel(r'$\beta$')
pp.savefig()
pl.clf()
pl.scatter(all_pix, all_gammas, c=all_gammas)
pl.plot(all_pix, gamma_pixel(all_pix), 'r')
ff = np.poly1d(np.polyfit(all_pix, all_gammas, 4))
info("Gamma: "+str(ff))
pl.plot(all_pix, ff(all_pix), 'b')
pl.ylabel(r'$\gamma$')
pp.savefig()
pl.clf()
delta_pixel = np.poly1d([4.284e-5, -0.1145, 1219])
pl.scatter(all_pix, all_deltas, c=all_gammas)
pl.plot(all_pix, delta_pixel(all_pix), 'r')
ff = np.poly1d(np.polyfit(all_pix, all_deltas, 4))
info("Delta: "+str(ff))
pl.ylabel(r'$\delta$')
pp.savefig()
pp.close()
if __name__ == "__main__":
np.set_printoptions(precision=3)
cc = np.load("lambda_coeffs_m120507_0230.npy")
px = []
ys = []
for c in cc:
px.append(c['2d']['positions'])
ys.append(c['2d']['coeffs'][:,0])
| 90,016 | 31.90095 | 216 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Background.py | import os
import sys
import time
import warnings
import numpy as np
from matplotlib import pyplot as pl
try:
from astropy.io import fits as pf
except:
import pyfits as pf
from multiprocessing import Pool
import scipy as sp
import scipy.ndimage
from scipy import interpolate as II
import MOSFIRE
from MOSFIRE import CSU, Fit, IO, Options, Filters, Detector, Wavelength
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
#from IPython.Shell import IPShellEmbed
#ipshell = IPShellEmbed()
class FitError(Exception):
def __init__(self,e):
self.e = e
def rem_header_key(header, key):
try:
del header[key]
except:
return False
return True
def guess_plan_from_positions(posnames):
''' Based on a Set() of position names guess the observing plan.
e.g., position names Set(["A", "B", "A'", "B'"]) -> "A-B", "A'-B'" '''
if posnames == set(["A", "B"]):
return [["A", "B"]]
elif posnames == set(["A'", "B'", "A", "B"]):
return [["A", "B"], ["A'", "B'"]]
else:
raise Exception("Could not get observing plan from positions %s. "
"You must use the plan keyword" % posnames)
def imcombine(files, maskname, options, flat, outname=None, shifts=None,
extension=None):
'''
From a list of files it imcombine returns the imcombine of several values.
The imcombine code also estimates the readnoise ad RN/sqrt(numreads) so
that the variance per frame is equal to (ADU + RN^2) where RN is computed
in ADUs.
Arguments:
files[]: list of full path to files to combine
maskname: Name of mask
options: Options dictionary
flat[2048x2048]: Flat field (values should all be ~ 1.0)
outname: If set, will write (see notes below for details)
eps_[outname].fits: electron/sec file
itimes_[outname].fits: integration time
var_[outname].fits: Variance files
shifts[len(files)]: If set, will "roll" each file by the
amount in the shifts vector in pixels. This argument
is used when telescope tracking is poor. If you need
to use this, please notify Keck staff about poor
telescope tracking.
Returns 6-element tuple:
header: The combined header
electrons [2048x2048]: e- (in e- units)
var [2048x2048]: electrons + RN**2 (in e-^2 units)
bs: The MOSFIRE.Barset instance
itimes [2048x2048]: itimes (in s units)
Nframe: The number of frames that contribute to the summed
arrays above. If Nframe > 5 I use the sigma-clipping
Cosmic Ray Rejection tool. If Nframe < 5 then I drop
the max/min elements.
Notes:
header -- fits header
ADUs -- The mean # of ADUs per frame
var -- the Variance [in adu] per frame.
bs -- Barset
itimes -- The _total_ integration time in second
Nframe -- The number of frames in a stack.
Thus the number of electron per second is derived as:
e-/sec = (ADUs * Gain / Flat) * (Nframe/itimes)
The total number of electrons is:
el = ADUs * Gain * Nframe
'''
ADUs = np.zeros((len(files), 2048, 2048))
itimes = np.zeros((len(files), 2048, 2048))
prevssl = None
prevmn = None
patternid = None
maskname = None
header = None
if shifts is None:
shifts = np.zeros(len(files))
warnings.filterwarnings('ignore')
for i in range(len(files)):
fname = files[i]
thishdr, data, bs = IO.readmosfits(fname, options, extension=extension)
itimes[i,:,:] = thishdr["truitime"]
base = os.path.basename(fname).rstrip(".fits")
fnum = int(base.split("_")[1])
if shifts[i] == 0:
ADUs[i,:,:] = data.filled(0.0) / flat
else:
ADUs[i,:,:] = np.roll(data.filled(0.0) / flat, np.int(shifts[i]), axis=0)
''' Construct Header'''
if header is None:
header = thishdr
header["imfno%3.3i" % (fnum)] = (fname, "img%3.3i file name" % fnum)
list(map(lambda x: rem_header_key(header, x), ["CTYPE1", "CTYPE2", "WCSDIM",
"CD1_1", "CD1_2", "CD2_1", "CD2_2", "LTM1_1", "LTM2_2", "WAT0_001",
"WAT1_001", "WAT2_001", "CRVAL1", "CRVAL2", "CRPIX1", "CRPIX2",
"RADECSYS"]))
for card in header.cards:
if card == '': continue
key,val,comment = card
if key in thishdr:
if val != thishdr[key]:
newkey = key + ("_img%2.2i" % fnum)
try: header[newkey.rstrip()] = (thishdr[key], comment)
except: pass
''' Now handle error checking'''
if maskname is not None:
if thishdr["maskname"] != maskname:
error("File %s uses mask '%s' but the stack is of '%s'" %
(fname, thishdr["maskname"], maskname))
raise Exception("File %s uses mask '%s' but the stack is of '%s'" %
(fname, thishdr["maskname"], maskname))
maskname = thishdr["maskname"]
if thishdr["aborted"]:
error("Img '%s' was aborted and should not be used" %
fname)
raise Exception("Img '%s' was aborted and should not be used" %
fname)
if prevssl is not None:
if len(prevssl) != len(bs.ssl):
# todo Improve these checks
error("The stack of input files seems to be of "
"different masks")
raise Exception("The stack of input files seems to be of "
"different masks")
prevssl = bs.ssl
if patternid is not None:
if patternid != thishdr["frameid"]:
error("The stack should be of '%s' frames only, but "
"the current image is a '%s' frame." % (patternid,
thishdr["frameid"]))
raise Exception("The stack should be of '%s' frames only, but "
"the current image is a '%s' frame." % (patternid,
thishdr["frameid"]))
patternid = thishdr["frameid"]
if maskname is not None:
if maskname != thishdr["maskname"]:
error("The stack should be of CSU mask '%s' frames "
"only but contains a frame of '%s'." % (maskname,
thishdr["maskname"]))
raise Exception("The stack should be of CSU mask '%s' frames "
"only but contains a frame of '%s'." % (maskname,
thishdr["maskname"]))
maskname = thishdr["maskname"]
if thishdr["BUNIT"] != "ADU per coadd":
error("The units of '%s' are not in ADU per coadd and "
"this violates an assumption of the DRP. Some new code "
"is needed in the DRP to handle the new units of "
"'%s'." % (fname, thishdr["BUNIT"]))
raise Exception("The units of '%s' are not in ADU per coadd and "
"this violates an assumption of the DRP. Some new code "
"is needed in the DRP to handle the new units of "
"'%s'." % (fname, thishdr["BUNIT"]))
''' Error checking is complete'''
debug("%s %s[%s]/%s: %5.1f s, Shift: %i px" % (fname, maskname, patternid,
header['filter'], np.mean(itimes[i]), shifts[i]))
warnings.filterwarnings('always')
# the electrons and el_per_sec arrays are:
# [2048, 2048, len(files)] and contain values for
# each individual frame that is being combined.
# These need to be kept here for CRR reasons.
electrons = np.array(ADUs) * Detector.gain
el_per_sec = electrons / itimes
output = np.zeros((2048, 2048))
exptime = np.zeros((2048, 2048))
numreads = header["READS0"]
RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
RN = Detector.RN / np.sqrt(numreads)
# Cosmic ray rejection code begins here. This code construction the
# electrons and itimes arrays.
standard = True
new_from_chuck = False
# Chuck Steidel has provided a modified version of the CRR procedure.
# to enable it, modify the variables above.
if new_from_chuck and not standard:
if len(files) >= 5:
print("Sigclip CRR")
srt = np.argsort(electrons, axis=0, kind='quicksort')
shp = el_per_sec.shape
sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]
electrons = electrons[srt, sti[1], sti[2]]
el_per_sec = el_per_sec[srt, sti[1], sti[2]]
itimes = itimes[srt, sti[1], sti[2]]
# Construct the mean and standard deviation by dropping the top and bottom two
# electron fluxes. This is temporary.
mean = np.mean(el_per_sec[1:-1,:,:], axis = 0)
std = np.std(el_per_sec[1:-1,:,:], axis = 0)
drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
print("dropping: ", len(drop[0]))
electrons[drop] = 0.0
itimes[drop] = 0.0
electrons = np.sum(electrons, axis=0)
itimes = np.sum(itimes, axis=0)
Nframe = len(files)
else:
warning( "With less than 5 frames, the pipeline does NOT perform")
warning( "Cosmic Ray Rejection.")
# the "if false" line disables cosmic ray rejection"
if False:
for i in range(len(files)):
el = electrons[i,:,:]
it = itimes[i,:,:]
el_mf = scipy.signal.medfilt(el, 5)
bad = np.abs(el - el_mf) / np.abs(el) > 10.0
el[bad] = 0.0
it[bad] = 0.0
electrons[i,:,:] = el
itimes[i,:,:] = it
electrons = np.sum(electrons, axis=0)
itimes = np.sum(itimes, axis=0)
Nframe = len(files)
if standard and not new_from_chuck:
if len(files) >= 9:
info("Sigclip CRR")
srt = np.argsort(electrons, axis=0, kind='quicksort')
shp = el_per_sec.shape
sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]
electrons = electrons[srt, sti[1], sti[2]]
el_per_sec = el_per_sec[srt, sti[1], sti[2]]
itimes = itimes[srt, sti[1], sti[2]]
# Construct the mean and standard deviation by dropping the top and bottom two
# electron fluxes. This is temporary.
mean = np.mean(el_per_sec[2:-2,:,:], axis = 0)
std = np.std(el_per_sec[2:-2,:,:], axis = 0)
drop = np.where( (el_per_sec > (mean+std*4)) | (el_per_sec < (mean-std*4)) )
info("dropping: "+str(len(drop[0])))
electrons[drop] = 0.0
itimes[drop] = 0.0
electrons = np.sum(electrons, axis=0)
itimes = np.sum(itimes, axis=0)
Nframe = len(files)
elif len(files) > 5:
warning( "WARNING: Drop min/max CRR")
srt = np.argsort(el_per_sec,axis=0)
shp = el_per_sec.shape
sti = np.ogrid[0:shp[0], 0:shp[1], 0:shp[2]]
electrons = electrons[srt, sti[1], sti[2]]
itimes = itimes[srt, sti[1], sti[2]]
electrons = np.sum(electrons[1:-1,:,:], axis=0)
itimes = np.sum(itimes[1:-1,:,:], axis=0)
Nframe = len(files) - 2
else:
warning( "With less than 5 frames, the pipeline does NOT perform")
warning( "Cosmic Ray Rejection.")
# the "if false" line disables cosmic ray rejection"
if False:
for i in range(len(files)):
el = electrons[i,:,:]
it = itimes[i,:,:]
# calculate the median image
el_mf = scipy.signal.medfilt(el, 5)
el_mf_large = scipy.signal.medfilt(el_mf, 15)
# LR: this is a modified version I was experimenting with. For the version
# written by Nick, see the new_from_chuck part of this code
# sky sub
el_sky_sub = el_mf - el_mf_large
# add a constant value
el_plus_constant = el_sky_sub + 100
bad = np.abs(el - el_mf) / np.abs(el_plus_constant) > 50.0
el[bad] = 0.0
it[bad] = 0.0
electrons[i,:,:] = el
itimes[i,:,:] = it
electrons = np.sum(electrons, axis=0)
itimes = np.sum(itimes, axis=0)
Nframe = len(files)
''' Now handle variance '''
numreads = header["READS0"]
RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
RN = Detector.RN / np.sqrt(numreads)
var = (electrons + RN**2)
''' Now mask out bad pixels '''
electrons[data.mask] = np.nan
var[data.mask] = np.inf
if "RN" in header:
error("RN Already populated in header")
raise Exception("RN Already populated in header")
header['RN'] = ("%1.3f" , "Read noise in e-")
header['NUMFRM'] = (Nframe, 'Typical number of frames in stack')
header['BUNIT'] = 'ELECTRONS/SECOND'
IO.writefits(np.float32(electrons/itimes), maskname, "eps_%s" % (outname),
options, header=header, overwrite=True)
# Update itimes after division in order to not introduce nans
itimes[data.mask] = 0.0
header['BUNIT'] = 'ELECTRONS^2'
IO.writefits(var, maskname, "var_%s" % (outname),
options, header=header, overwrite=True, lossy_compress=True)
header['BUNIT'] = 'SECOND'
IO.writefits(np.float32(itimes), maskname, "itimes_%s" % (outname),
options, header=header, overwrite=True, lossy_compress=True)
return header, electrons, var, bs, itimes, Nframe
def merge_headers(h1, h2):
"""Merge headers h1 and h2 such that h2 has the nod position name
appended"""
h = h1.copy()
patternid = h2["frameid"]
for key, val, comment in h2.cards:
if "NAXIS" in key: continue
if "SIMPLE" in key: continue
if "BITPIX" in key: continue
if "EXTEND" in key: continue
if key in h:
continue
else:
try: h[key] = (val, comment)
except: pass
return h
def handle_background(filelist, wavename, maskname, band_name, options,
shifts=None, plan=None, extension=None, target='default'):
'''
Perform difference imaging and subtract residual background.
The plan looks something like: [['A', 'B']]
In this case, the number of output files is equal to the length of the list (1).
If you choose to use an ABA'B' pattern then the plan will be: [["A", "B"], ["A'", "B'"]]
the background subtraction code will make and handle two files, "A-B" and "A'-B'".
'''
global header, bs, edges, data, Var, itime, lam, sky_sub_out, sky_model_out, band
band = band_name
flatname = "pixelflat_2d_%s.fits" % band_name
hdr, flat = IO.readfits("pixelflat_2d_%s.fits" % (band_name), options)
if np.abs(np.median(flat) - 1) > 0.1:
error("Flat seems poorly behaved.")
raise Exception("Flat seems poorly behaved.")
'''
This next section of the code figures out the observing plan
and then deals with the bookeeping of sending the plan
to the background subtracter.
'''
hdrs = []
epss = {}
vars = {}
bss = []
times = {}
Nframes = []
i = 0
header = pf.Header()
for i in range(len(filelist)):
fl = filelist[i]
files = IO.list_file_to_strings(fl)
info("Combining observation files listed in {}".format(fl))
if shifts is None: shift = None
else: shift = shifts[i]
hdr, electron, var, bs, time, Nframe = imcombine(files, maskname,
options, flat, outname="%s.fits" % (fl),
shifts=shift, extension=extension)
hdrs.append(hdr)
header = merge_headers(header, hdr)
epss[hdr['FRAMEID']] = electron/time
vars[hdr['FRAMEID']] = var
times[hdr['FRAMEID']] = time
bss.append(bs)
Nframes.append(Nframe)
positions = {}
i = 0
for h in hdrs:
positions[h['FRAMEID']] = i
i += 1
posnames = set(positions.keys())
if plan is None:
plan = guess_plan_from_positions(posnames)
num_outputs = len(plan)
edges, meta = IO.load_edges(maskname, band, options)
lam = IO.readfits(wavename, options)
bs = bss[0]
for i in range(num_outputs):
posname0 = plan[i][0]
posname1 = plan[i][1]
info("Handling %s - %s" % (posname0, posname1))
data = epss[posname0] - epss[posname1]
Var = vars[posname0] + vars[posname1]
itime = np.mean([times[posname0], times[posname1]], axis=0)
p = Pool()
solutions = p.map(background_subtract_helper, list(range(len(bs.ssl))))
p.close()
write_outputs(solutions, itime, header, maskname, band, plan[i], options, target=target)
def write_outputs(solutions, itime, header, maskname, band_name, plan, options, target):
sky_sub_out = np.zeros((2048, 2048), dtype=np.float)
sky_model_out = np.zeros((2048, 2048), dtype=np.float)
p0 = plan[0].replace("'", "p")
p1 = plan[1].replace("'", "p")
suffix = "%s-%s" % (p0,p1)
xroi = slice(0,2048)
for sol in solutions:
if not sol["ok"]:
continue
yroi = slice(sol["bottom"], sol["top"])
sky_sub_out[yroi, xroi] = sol["output"]
sky_model_out[yroi, xroi] = sol["model"]
if target is 'default':
outname = maskname
else:
outname = target
header['BUNIT'] = 'SECOND'
IO.writefits(itime, maskname, "itime_%s_%s_%s.fits" % (outname, band,
suffix), options, header=header, overwrite=True, lossy_compress=True)
header['BUNIT'] = 'ELECTRONS/SECOND'
IO.writefits(data, maskname, "sub_%s_%s_%s.fits" % (outname, band,
suffix), options, header=header, overwrite=True, lossy_compress=True)
header['BUNIT'] = 'ELECTRONS/SECOND'
IO.writefits(sky_sub_out, maskname, "bsub_%s_%s_%s.fits" % (outname, band,
suffix), options, header=header, overwrite=True)
header['BUNIT'] = 'ELECTRONS'
IO.writefits(Var, maskname, "var_%s_%s_%s.fits" % (outname, band,
suffix), options, header=header, overwrite=True, lossy_compress=True)
header['BUNIT'] = 'ELECTRONS/SECOND'
IO.writefits(sky_model_out, maskname, "bmod_%s_%s_%s.fits" % (outname,
band, suffix), options, header=header, overwrite=True,
lossy_compress=True)
'''Now create rectified solutions'''
dlam = Wavelength.grating_results(band)
hpp = np.array(Filters.hpp[band])
ll_fid = np.arange(hpp[0], hpp[1], dlam)
nspec = len(ll_fid)
rectified = np.zeros((2048, nspec), dtype=np.float32)
rectified_var = np.zeros((2048, nspec), dtype=np.float32)
rectified_itime = np.zeros((2048, nspec), dtype=np.float32)
from scipy.interpolate import interp1d
for i in range(2048):
ll = lam[1][i,:]
ss = sky_sub_out[i,:]
ok = np.isfinite(ll) & np.isfinite(ss) & (ll < hpp[1]) & (ll >
hpp[0])
if len(np.where(ok)[0]) < 100:
continue
f = interp1d(ll[ok], ss[ok], bounds_error=False)
rectified[i,:] = f(ll_fid)
f = interp1d(ll, Var[i,:], bounds_error=False)
rectified_var[i,:] = f(ll_fid)
f = interp1d(ll, itime[i,:], bounds_error=False)
rectified_itime[i,:] = f(ll_fid)
header["wat0_001"] = "system=world"
header["wat1_001"] = "wtype=linear"
header["wat2_001"] = "wtype=linear"
header["dispaxis"] = 1
header["dclog1"] = "Transform"
header["dc-flag"] = 0
header["ctype1"] = "AWAV"
header["cunit1"] = "Angstrom"
header["crval1"] = (ll_fid[0], "Starting wavelength Angstrom")
header["crval2"] = 0
header["crpix1"] = 1
header["crpix2"] = 1
header["cdelt1"] = 1
header["cdelt2"] = 1
header["cname1"] = "angstrom"
header["cname2"] = "pixel"
header["cd1_1"] = (dlam, "Angstrom/pixel")
header["cd1_2"] = 0
header["cd2_1"] = 0
header["cd2_2"] = (1, "pixel/pixel")
IO.writefits(rectified_itime, maskname,
"%s_rectified_itime_%s_%s.fits" % (outname, band_name,
suffix), options, header=header, overwrite=True, lossy_compress=True)
IO.writefits(rectified, maskname, "%s_rectified_%s_%s.fits" % (outname,
band_name, suffix), options, header=header, overwrite=True,
lossy_compress=True)
IO.writefits(rectified_var, maskname, "%s_rectified_var_%s_%s.fits" %
(outname, band_name, suffix), options, header=header, overwrite=True,
lossy_compress=True)
IO.writefits(rectified*rectified_itime/np.sqrt(rectified_var), maskname,
"%s_rectified_sn_%s_%s.fits" % (outname, band_name,
suffix), options, header=header, overwrite=True, lossy_compress=True)
def background_subtract_helper(slitno):
'''
Background subtraction follows the methods outlined by Kelson (2003). Here
a background is estimated as a function of wavelength using B-splines and
subtracted off. The assumption is that background is primarily a function
of wavelength, and thus by sampling the background across the full 2-d
spectrum the background is sampled at much higher than the native spectral
resolution of mosfire.
Formally, the assumption that background is only a function of wavelength
is incorrect, and indeed a "transmission function" is estimated from the 2d
spectrum. This gives an estimate of the throughput of the slit and divided
out.
1. Extract the slit from the 2d image.
2. Convert the 2d spectrum into a 1d spectrum
3. Estimate transmission function
'''
global header, bs, edges, data, Var, itime, lam, sky_sub_out, sky_model_out, band
tick = time.time()
# 1
top = np.int(edges[slitno]["top"](1024))
bottom = np.int(edges[slitno]["bottom"](1024))
info("Background subtracting slit %i [%i,%i]" % (slitno, top, bottom))
pix = np.arange(2048)
xroi = slice(0,2048)
yroi = slice(bottom, top)
stime = itime[yroi, xroi]
slit = data[yroi, xroi]
Var[np.logical_not(np.isfinite(Var))] = np.inf
lslit = lam[1][yroi,xroi]
# 2
xx = np.arange(slit.shape[1])
yy = np.arange(slit.shape[0])
X,Y = np.meshgrid(xx,yy)
train_roi = slice(5,-5)
ls = lslit[train_roi].flatten().filled(0)
ss = slit[train_roi].flatten()
ys = Y[train_roi].flatten()
dl = np.ma.median(np.diff(lslit[lslit.shape[0]//2,:]))
if dl == 0:
return {"ok": False}
sort = np.argsort(ls)
ls = ls[sort]
ys = ys[sort]
hpps = np.array(Filters.hpp[band])
diff = np.append(np.diff(ls), False)
OK = (diff > 0.001) & (ls > hpps[0]) & (ls < hpps[1]) & (np.isfinite(ls)) \
& (np.isfinite(ss[sort]))
if len(np.where(OK)[0]) < 1000:
warning("Failed on slit "+str(slitno))
return {"ok": False}
# 3
pp = np.poly1d([1.0])
ss = (slit[train_roi] / pp(Y[train_roi])).flatten()
ss = ss[sort]
knotstart = max(hpps[0], min(ls[OK])) + 5
knotend = min(hpps[1], max(ls[OK])) - 5
for i in range(3):
try:
delta = dl*0.9
knots = np.arange(knotstart, knotend, delta)
bspline = II.splrep(ls[OK], ss[OK], k=5, task=-1, t=knots)
except ValueError as e:
warning('Failed to fit spline with delta = {:5f}'.format(delta))
warning(str(e))
delta = dl*1.4
info('Trying with delta = {:5f}'.format(delta))
knots = np.arange(knotstart, knotend, delta)
try:
bspline = II.splrep(ls[OK], ss[OK], k=5, task=-1, t=knots)
except ValueError as e:
warning("Could not construct spline on slit "+str(slitno))
warning(str(e))
return {"ok": False}
ll = lslit.flatten()
model = II.splev(ll, bspline)
oob = np.where((ll < knotstart) | (ll > knotend))
model[oob] = np.median(ss[~np.isnan(ss)])
model = model.reshape(slit.shape)
output = slit - model
std = np.abs(output)/(np.sqrt(np.abs(model)+1))
tOK = (std[train_roi] < 10).flatten() & \
np.isfinite(std[train_roi]).flatten()
OK = OK & tOK[sort]
return {"ok": True, "slitno": slitno, "bottom": bottom, "top": top,
"output": output, "model": model, "bspline": bspline}
if __name__ == "__main__":
background_subtract()
| 25,167 | 33.571429 | 96 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Util.py | 4 | 0 | 0 | py |
|
MosfireDRP | MosfireDRP-master/MOSFIRE/conftest.py | # this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from astropy.tests.pytest_plugins import *
## Uncomment the following line to treat all DeprecationWarnings as
## exceptions
#enable_deprecations_as_exceptions()
## Uncomment and customize the following lines to add/remove entries
## from the list of packages for which version numbers are displayed
## when running the tests
# try:
# PYTEST_HEADER_MODULES['scikit-image'] = 'skimage'
# del PYTEST_HEADER_MODULES['h5py']
# except NameError: # needed to support Astropy < 1.0
# pass
| 699 | 35.842105 | 73 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/_astropy_init.py | # Licensed under a 3-clause BSD style license - see LICENSE.rst
__all__ = ['__version__', '__githash__', 'test']
# this indicates whether or not we are in the package's setup.py
try:
_ASTROPY_SETUP_
except NameError:
from sys import version_info
if version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = False
try:
from .version import version as __version__
except ImportError:
__version__ = ''
try:
from .version import githash as __githash__
except ImportError:
__githash__ = ''
# set up the test command
def _get_test_runner():
import os
from astropy.tests.helper import TestRunner
return TestRunner(os.path.dirname(__file__))
def test(package=None, test_path=None, args=None, plugins=None,
verbose=False, pastebin=None, remote_data=False, pep8=False,
pdb=False, coverage=False, open_files=False, **kwargs):
"""
Run the tests using `py.test <http://pytest.org/latest>`__. A proper set
of arguments is constructed and passed to `pytest.main`_.
.. _py.test: http://pytest.org/latest/
.. _pytest.main: http://pytest.org/latest/builtin.html#pytest.main
Parameters
----------
package : str, optional
The name of a specific package to test, e.g. 'io.fits' or 'utils'.
If nothing is specified all default tests are run.
test_path : str, optional
Specify location to test by path. May be a single file or
directory. Must be specified absolutely or relative to the
calling directory.
args : str, optional
Additional arguments to be passed to pytest.main_ in the ``args``
keyword argument.
plugins : list, optional
Plugins to be passed to pytest.main_ in the ``plugins`` keyword
argument.
verbose : bool, optional
Convenience option to turn on verbose output from py.test_. Passing
True is the same as specifying ``'-v'`` in ``args``.
pastebin : {'failed','all',None}, optional
Convenience option for turning on py.test_ pastebin output. Set to
``'failed'`` to upload info for failed tests, or ``'all'`` to upload
info for all tests.
remote_data : bool, optional
Controls whether to run tests marked with @remote_data. These
tests use online data and are not run by default. Set to True to
run these tests.
pep8 : bool, optional
Turn on PEP8 checking via the `pytest-pep8 plugin
<http://pypi.python.org/pypi/pytest-pep8>`_ and disable normal
tests. Same as specifying ``'--pep8 -k pep8'`` in ``args``.
pdb : bool, optional
Turn on PDB post-mortem analysis for failing tests. Same as
specifying ``'--pdb'`` in ``args``.
coverage : bool, optional
Generate a test coverage report. The result will be placed in
the directory htmlcov.
open_files : bool, optional
Fail when any tests leave files open. Off by default, because
this adds extra run time to the test suite. Requires the
`psutil` package.
parallel : int, optional
When provided, run the tests in parallel on the specified
number of CPUs. If parallel is negative, it will use the all
the cores on the machine. Requires the
`pytest-xdist <https://pypi.python.org/pypi/pytest-xdist>`_ plugin
installed. Only available when using Astropy 0.3 or later.
kwargs
Any additional keywords passed into this function will be passed
on to the astropy test runner. This allows use of test-related
functionality implemented in later versions of astropy without
explicitly updating the package template.
"""
test_runner = _get_test_runner()
return test_runner.run_tests(
package=package, test_path=test_path, args=args,
plugins=plugins, verbose=verbose, pastebin=pastebin,
remote_data=remote_data, pep8=pep8, pdb=pdb,
coverage=coverage, open_files=open_files, **kwargs)
if not _ASTROPY_SETUP_:
import os
from warnings import warn
from astropy import config
# add these here so we only need to cleanup the namespace at the end
config_dir = None
if not os.environ.get('ASTROPY_SKIP_CONFIG_UPDATE', False):
config_dir = os.path.dirname(__file__)
config_template = os.path.join(config_dir, __package__ + ".cfg")
if os.path.isfile(config_template):
try:
config.configuration.update_default_config(
__package__, config_dir, version=__version__)
except TypeError as orig_error:
try:
config.configuration.update_default_config(
__package__, config_dir)
except config.configuration.ConfigurationDefaultMissingError as e:
wmsg = (e.args[0] + " Cannot install default profile. If you are "
"importing from source, this is expected.")
warn(config.configuration.ConfigurationDefaultMissingWarning(wmsg))
del e
except:
raise orig_error
| 5,234 | 36.661871 | 87 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/CSU.py |
'''
MOSFIRE CSU Utlity Code
Includes physical parameters of CSU.
Created March 2, 2011 by npk
numslits, numbars give information about the number of slits and bars in the CSU.
Note, for consistency with the hardware bars and slits are indexed from 1.
tempscale is the thermal scaling factor to shrink room temperature linear
dimensions to 120 K. The number 0.99646 is from R. Weber's spreadsheet
"MOSFIRE Thermal Dimension Scaling Factors.xls", referenced from
"Thermophysical properties of matter, Vol 12-13, Thermal Expansion".
demagnification (7.24254), and center_pix (1042.99 pix, 1035.88 pix) is
measured by ccs in January and Feb 2011 using pinhole mask data taken
during the eighth cooldown. These are described in
"README.focal_plane_mapping.txt".
bar pitch is 5.8 mm which is related to pixels using the demagnification
and temperature scale.
'''
import MOSFIRE
from MOSFIRE import Detector, IO
import numpy as np
import unittest
import os
import pdb
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
from astropy.modeling import models
import pickle
class MismatchError(Exception):
'''The code expected a CSU with 46 slits, but found something else.'''
def __init__(self, value):
self.parameter = value
numslits = 46
numbars = numslits * 2
tempscale = 0.99646
def mm_to_pix(mm):
return mm/demagnification/Detector.pixelsize
mm = 1
demagnification = 7.24254
center_pix = (1042.986, 1035.879)
barpitch_mm = (5.8 * mm * tempscale)
barpitch_pix = mm_to_pix(5.8 * mm * tempscale)
def in_field(px, py):
'''Determines if the pixel coordinate (x,y) is within the circular 1150 pix FOV'''
x = px-center_pix[0]
y = py-center_pix[1]
dist = np.sqrt(x*x + y*y)
if dist < 1150.: return True
return False
def csu_pix_to_mm_poly(x_pix, y_pix):
(x_kfp, y_kfp) = python_geoxytran(x_pix, y_pix, direction="backward")
centerx = 137.400
x_mm = centerx - x_kfp
y_mm = y_kfp
return (x_mm, y_mm)
def csu_mm_to_pix_poly(x_mm, slitno):
'''Uses ccs fits in ../platescale directory'''
# _kfp is keck focal plane
centerx = 137.400
x_kfp = (centerx - x_mm)
y_kfp = 5.8 * (numslits/2. - slitno + 0.35) * tempscale
return python_geoxytran(x_kfp, y_kfp)
def python_geoxytran(x_kfp, y_kfp, direction="forward"):
if direction=="backward":
px_backward = models.Legendre2D(6, 6, c0_0=-1.359159118077272, c1_0=0.12849509682026816, c2_0=0.00017587310282272408, c3_0=-8.214009406649863e-06,
c4_0=2.0206624190921399e-07, c5_0=-2.225331028379213e-09, c6_0=1.8201097072390407e-14, c0_1=-0.0008865780983085235,
c1_1=0.0002837258293901996, c2_1=-1.3953479926814954e-05, c3_1=2.6865725414225316e-07, c4_1=3.7333388292351965e-10,
c5_1=8.662694037494459e-13, c6_1=-7.6802320344598e-15, c0_2=9.616027720780746e-05, c1_2=-1.5463196269995818e-05,
c2_2=4.615248418093103e-07, c3_2=-9.938940430240368e-09, c4_2=7.538338883385691e-12, c5_2=-1.8016883087953452e-13,
c6_2=1.6284543459178821e-15, c0_3=-2.7112157097925163e-06, c1_3=3.63691974893539e-07, c2_3=6.326334170834919e-10,
c3_3=1.2045620539279474e-11, c4_3=-6.281301326529564e-13, c5_3=1.5395969945758583e-14, c6_3=-1.4203191615580046e-16,
c0_4=2.487234831550635e-08, c1_4=-5.3202681529753e-09, c2_4=3.813876920246599e-12, c3_4=-4.578771786695712e-13,
c4_4=2.4833675429790513e-14, c5_4=-6.278532214053127e-16, c6_4=5.932362209122972e-18, c0_5=2.6533817724685113e-10,
c1_5=6.362774492493808e-14, c2_5=-5.695287662674329e-14, c3_5=7.648943667217284e-15, c4_5=-4.4244874441233506e-16,
c5_5=1.1718033882619874e-17, c6_5=-1.1450561454795142e-19, c0_6=-5.252495563272626e-15, c1_6=6.498737275590606e-16,
c2_6=2.2221508682832634e-16, c3_6=-4.096197448486931e-17, c4_6=2.7086424901520096e-18, c5_6=-7.787892566015997e-20,
c6_6=8.028451974197805e-22)
py_backward = models.Legendre2D(6, 6, c0_0=-1.3408760539296245, c1_0=-0.0014681933080717899, c2_0=6.252434078059442e-05, c3_0=-1.7794023960848901e-06,
c4_0=2.0505693079301286e-08, c5_0=1.3303121908968087e-10, c6_0=4.036925907590215e-14, c0_1=0.1287659978047137,
c1_1=0.0002187658143857909, c2_1=-1.1414122040749694e-05, c3_1=2.514881941931133e-07, c4_1=-4.014646650126551e-09,
c5_1=4.6361664655461665e-12, c6_1=-4.2907954493018394e-14, c0_2=0.00017210509816287917, c1_2=-1.1517572721650909e-05,
c2_2=4.070900780580943e-07, c3_2=-2.924032092730881e-10, c4_2=4.3651272195759074e-11, c5_2=-1.0942185864222553e-12,
c6_2=1.0124374619198603e-14, c0_3=-8.927312822514692e-06, c1_3=2.178919731355544e-07, c2_3=-9.309247977587529e-09,
c3_3=7.587241655284752e-11, c4_3=-4.20296301764774e-12, c5_3=1.0534116340750084e-13, c6_3=-9.745842383678927e-16,
c0_4=2.3500427475161216e-07, c1_4=5.797618861500032e-10, c2_4=2.787756737792332e-11, c3_4=-3.471711550473785e-12,
c4_4=1.9236825440442053e-13, c5_4=-4.82226129561789e-15, c6_4=4.4622762166743036e-17, c0_5=-2.631606790683655e-09,
c1_5=1.8088697785601813e-12, c2_5=-6.029913349859671e-13, c3_5=7.51954755289406e-14, c4_5=-4.1690939247203245e-15,
c5_5=1.0455627246423308e-16, c6_5=-9.679289662228309e-19, c0_6=1.1235020215574227e-14, c1_6=-1.481115941278654e-14,
c2_6=4.964545148330356e-15, c3_6=-6.201605688722248e-16, c4_6=3.441248923238193e-17, c5_6=-8.635683356739731e-19, c6_6=7.999236760366155e-21)
x = px_backward(x_kfp/100.0,y_kfp/100.0)*100.0
y = py_backward(x_kfp/100.0,y_kfp/100.0)*100.0
if direction=="forward":
px_forward = models.Legendre2D(6, 6, c0_0=10.429995608040636, c1_0=7.669072866696911, c2_0=-0.0005171642200091058, c3_0=0.0010640118370285666,
c4_0=6.591355825164487e-05, c5_0=0.0004579785406086718, c6_0=-3.381749890396372e-07, c0_1=-0.029899970699651657,
c1_1=-1.0370107736602057e-05, c2_1=0.00012355149488906862, c3_1=0.0001681000870132385, c4_1=-8.915078035548195e-05,
c5_1=1.0828480948981245e-06, c6_1=-3.0604028638458465e-07, c0_2=-0.0005617773576709009, c1_2=0.0021497066157591966,
c2_2=0.0003159972245946561, c3_2=0.001999515078707485, c4_2=9.953809608627005e-07, c5_2=8.667245967324062e-06,
c6_2=2.8043195865254592e-08, c0_3=0.0001440482443085916, c1_3=-3.4260998883389794e-05, c2_3=-0.00016054621466681272,
c3_3=9.517312759587115e-07, c4_3=-4.1446577769773705e-07, c5_3=3.6262929000660604e-07, c6_3=-1.4504338440561597e-07,
c0_4=0.00015388407922332725, c1_4=0.0010841802946648064, c2_4=5.000639720902258e-07, c3_4=5.927690431156899e-06,
c4_4=5.991902315389979e-07, c5_4=1.7610373474546858e-06, c6_4=3.6698527469006115e-09, c0_5=-5.883225927240384e-05,
c1_5=3.472735736376934e-07, c2_5=-1.0563775236811306e-06, c3_5=2.2897505242989876e-07, c4_5=-2.389407023502443e-07,
c5_5=-2.7244280935829703e-09, c6_5=-2.0771844124138064e-08, c0_6=2.8762976671867224e-07, c1_6=3.2864844277261225e-06,
c2_6=3.881850299314179e-07, c3_6=1.3047967293000456e-06, c4_6=1.0513711949538813e-08, c5_6=8.671289818897808e-09, c6_6=-1.760160785036003e-09)
py_forward = models.Legendre2D(6, 6, c0_0=10.35865352089189, c1_0=0.029868752847050786, c2_0=-5.181441268292675e-05, c3_0=9.57356706812987e-05,
c4_0=0.00017241278829382978, c5_0=-2.4675256900133155e-05, c6_0=3.95235509529886e-07, c0_1=7.66897227133641,
c1_1=0.00034364965616906756, c2_1=0.0027356854767171535, c3_1=0.00042743941177974913, c4_1=0.0007837226536120123,
c5_1=9.874568475762985e-07, c6_1=2.3707441689970604e-06, c0_2=0.0003489725952788292, c1_2=0.0002812999003417556,
c2_2=0.0006371193460650293, c3_2=-8.6155293329177e-05, c4_2=1.6936419268906433e-06, c5_2=-3.490309288135124e-07,
c6_2=4.3797605194396266e-07, c0_3=0.0007884544919103953, c1_3=0.00021149527310720538, c2_3=0.0017752319143687079,
c3_3=1.4502993553656453e-06, c4_3=5.374215134624355e-06, c5_3=4.6355913497424066e-07, c6_3=9.418886502384274e-07,
c0_4=9.01336941821207e-05, c1_4=-0.0001230909091922516, c2_4=2.2577487548726405e-06, c3_4=8.480105722621575e-08,
c4_4=6.821359101651797e-07, c5_4=-2.7924348331191066e-07, c6_4=1.195944732829135e-08, c0_5=0.0005414644597315499,
c1_5=1.1375883331712563e-06, c2_5=8.954534272097303e-06, c3_5=5.176871464493416e-07, c4_5=1.5475712624698004e-06,
c5_5=1.0885690389676392e-08, c6_5=1.1381062005799344e-08, c0_6=1.4659708681089706e-07, c1_6=-6.338488221563267e-07,
c2_6=2.1673413055835894e-07, c3_6=-2.8336747286630153e-07, c4_6=1.6623075723212755e-08, c5_6=1.7700444635232546e-08, c6_6=-1.7039889450896992e-09)
x = px_forward(x_kfp/100.0,y_kfp/100.0)*100.0
y = py_forward(x_kfp/100.0,y_kfp/100.0)*100.0
return (x,y)
def bar_to_slit(x):
'''Convert a bar #(1-92) to a slit(1-46) number'''
if (x < 1) or (x > numbars):
error("Not indexing CSU properly")
raise MismatchError("Not indexing CSU properly")
return int(x+1)//2
def to_ds9_region(poss, dash=1, color="green", label=True):
s = []
d = np.radians(4.2)
dx = barpitch_pix/2. * np.sin(d)
dy = barpitch_pix/2. * np.cos(d)
for i in range(1,numbars+1):
pos = poss[i-1]
if not np.isfinite(pos[0]): continue
if not np.isfinite(pos[1]): continue
ln = [pos[0]+dx,pos[1]-dy,pos[0]-dx,pos[1]+dy]
if label:
s.append("line(%6.3f, %6.3f, %6.3f, %6.3f) # line=0 0 color=%s text={b%2.0i} dash=%1i fixed=1 edit=0 move=0 rotate=0 \n" % (ln[0], ln[1], ln[2], ln[3], color, i, dash))
else:
s.append("line(%6.3f, %6.3f, %6.3f, %6.3f) # line=0 0 color=%s dash=%1i fixed=1 edit=0 move=0 rotate=0 \n" % (ln[0], ln[1], ln[2], ln[3], color, dash))
return s
class Barset(object):
'''Barset provides convenience functions around a CSU slitmask'''
pos = []
pos_pix = []
header = None
# Science slit list, mechanical slit list, & alignment slit list.
ssl = None
msl = None
asl = None
targs = None
long_slit = False
long2pos_slit = False
scislit_to_slit = []
alignment_slits = []
def __init__(self):
pass
def set_header(self, header, ssl=None, msl=None, asl=None, targs=None):
'''Passed "header" a FITS header dictionary and converts to a Barset'''
self.pos = np.array(IO.parse_header_for_bars(header))
self.set_pos_pix()
self.ssl = ssl
self.msl = msl
self.asl = asl
self.targs = targs
def is_alignment_slit(slit):
return (np.float(slit["Target_Priority"]) < 0)
# If len(ssl) == 0 then the header is for a long slit
if (header['MASKNAME'] == 'long2pos'):
info("long2pos mode in CSU slit determination")
self.long2pos_slit = True
if (len(ssl) == 0):
self.long_slit = True
start = np.int(msl[0]["Slit_Number"])
stop = np.int(msl[-1]["Slit_Number"])
for mech_slit in msl:
mech_slit["Target_in_Slit"] = "long"
self.ssl = np.array([("1", "??", "??", "??", "??", "??", "??", msl[0]['Slit_width'],
(stop-start+1)*7.6, "0", "long", "0")],
dtype= [ ('Slit_Number', '|S2'),
('Slit_RA_Hours', '|S2'), ('Slit_RA_Minutes', '|S2'), ('Slit_RA_Seconds', '|S5'),
('Slit_Dec_Degrees', '|S3'), ('Slit_Dec_Minutes', '|S2'), ('Slit_Dec_Seconds', '|S5'),
('Slit_width', '|S5'), ('Slit_length', '|S5'), ('Target_to_center_of_slit_distance', '|S5'),
('Target_Name', '|S80'), ('Target_Priority', '|S1')])
self.scislit_to_slit = [ np.arange(start,stop) ]
ssl = None
# Create a map between scislit number and mechanical slit
# recall that slits count from 1
if ssl is not None:
prev = self.msl[0]["Target_in_Slit"]
v = []
for science_slit in ssl:
targ = science_slit["Target_Name"]
v.append([int(x) for x in self.msl.field("Slit_Number")[np.where(self.msl.field("Target_in_Slit").rstrip() == targ)[0]]])
self.scislit_to_slit = v
if (len(self.scislit_to_slit) != len(ssl)) and not (self.long_slit
and len(self.scislit_to_slit) == 1):
error("SSL should match targets in slit")
raise Exception("SSL should match targets in slit")
def is_alignment_slitno(self, slitno):
return (slitno in self.alignment_slits)
def csu_slit_center(self, slitno):
'''Returns the mechanical (middle) position of a csu slit in mm'''
if (slitno < 1) or (slitno > 46):
error("The requested slit number (%i) does not exist" %
slitno)
raise Exception("The requested slit number (%i) does not exist" %
slitno)
os = self.pos[slitno*2 - 2]
es = self.pos[slitno*2 - 1]
return (os+es)/2.
def scislit_to_csuslit(self, scislit):
'''Convert a science slit number to a mechanical slit list'''
if (scislit < 1) or (scislit > len(self.ssl)+1):
error("The requested slit number (%i) does not exist" %
scislit)
raise Exception("The requested slit number (%i) does not exist" %
scislit)
return self.scislit_to_slit[scislit-1]
def csu_slit_to_pixel(self, slit):
'''Convert a CSU slit number to spatial pixel'''
y0 = 2013
if (slit < 1) or (slit > 46):
error("The requested slit number (%i) does not exist" %
slit)
raise Exception("The requested slit number (%i) does not exist" %
slit)
pixel = np.int(y0 - (slit -1) * 44.22)
return pixel
def science_slit_to_pixel(self, scislit):
'''Convert a science slit number to spatial pixel'''
if (scislit < 1) or (scislit > len(self.ssl)):
error("The requested science slit number %i does not exist" \
% scislit)
raise Exception("The requested science slit number %i does not exist" \
% scislit)
slits = self.scislit_to_csuslit(scislit)
debug(str(slits))
return self.csu_slit_to_pixel(np.median(slits))
def set_pos_pix(self):
# _kfp is keck focal plane
centerx = 137.400
x_kfp = (centerx - self.pos)
slitno = np.ceil(np.arange(1, numbars+1)/2.)
y_kfp = 5.8 * (numslits/2. - slitno + 0.35) * tempscale
tmp_results = python_geoxytran(x_kfp, y_kfp)
self.pos_pix = np.asarray([list(p) for p in zip(tmp_results[0],tmp_results[1])])
def to_ds9_region(self):
poss = []
for i in range(1,numbars+1):
poss.append(self.get_bar_pix(i))
return to_ds9_region(poss)
def get_bar_pix(self, bar):
'''Return the pixel position of bar(1-92)'''
return self.pos_pix[bar-1]
class TestCSUFunctions(unittest.TestCase):
def setUp(self):
pass
def test_bar_to_slit(self):
sa = self.assertTrue
sa(bar_to_slit(1) == 1)
sa(bar_to_slit(2) == 1)
sa(bar_to_slit(91)==46)
sa(bar_to_slit(92)==46)
sa(bar_to_slit(92.)==46)
sa(bar_to_slit(1.)==1)
sa(bar_to_slit(1.5)==1)
sa(bar_to_slit(2.)==1)
self.assertRaises(MismatchError, bar_to_slit, (-1, 0, 93, 94))
def test_bar_mm(self):
sa = self.assertTrue
# Values are taken from ccs
p0 = python_geoxytran(0,0)
sa(np.abs(p0[0] - center_pix[0]) < 1e-6)
sa(np.abs(p0[1] - center_pix[1]) < 1e-6)
def test_Barset(self):
b = Barset()
pos = np.arange(92)
b.set_mms(pos)
self.assertTrue((b.pos == pos).all())
p1 = b.get_bar_pix(1)
#self.assertTrue((p1==csu_mm_to_pix(pos[0], 1)).all())
# FIXME More tests here
if __name__ == '__main__':
unittest.main()
| 17,374 | 45.457219 | 183 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/MosfireDrpLog.py | import logging, sys
LOG_FILENAME = "mosfire_DRP.log"
formatter = logging.Formatter('%(asctime)s - %(module)12s.%(funcName)20s - %(levelname)s: %(message)s')
# set up logging to STDOUT for all levels DEBUG and higher
sh = logging.StreamHandler(sys.stdout)
sh.setLevel(logging.INFO)
sh.setFormatter(formatter)
# set up logging to a file for all levels DEBUG and higher
fh = logging.FileHandler(LOG_FILENAME)
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
# create Logger object
mylogger = logging.getLogger('MyLogger')
mylogger.setLevel(logging.DEBUG)
mylogger.addHandler(sh) # enabled: stdout
mylogger.addHandler(fh) # enabled: file
# create shortcut functions
debug = mylogger.debug
info = mylogger.info
warning = mylogger.warning
error = mylogger.error
critical = mylogger.critical
#logger.info ("log started")
# Add log entries for versions of numpy, matplotlib, astropy, ccdproc
info(sys.version)
info('python version = {}.{}.{}'.format(sys.version_info.major,
sys.version_info.minor,
sys.version_info.micro))
import numpy as np
info('numpy version = {}'.format(np.__version__))
import matplotlib
info('matplotlib version = {}'.format(matplotlib.__version__))
import astropy
info('astropy version = {}'.format(astropy.__version__))
import ccdproc
info('ccdproc version = {}'.format(ccdproc.__version__))
| 1,410 | 29.021277 | 103 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Fit.py | '''
Fitting code used by a variety of MOSFIRE applications.
Written in March 2011 by npk
'''
from scipy.special import erf
import scipy.optimize as optimize
import numpy as np
from matplotlib import pyplot as pl
import MOSFIRE.nmpfit_mos as mpfit
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
# Following is to correct for old/new version of stsci python
## try:
## import pytools
## except ImportError: import stsci.tools.nmpfit as mpfit
#from MosfireDrpLog import debug, info, warning, error
import unittest
def xcor(a,b,lags):
if len(a) != len(b):
error("cross correlation (xcor) requires a and b "
"to be of same length")
raise Exception(
"cross correlation (xcor) requires a and b "
"to be of same length")
cors = np.zeros(len(lags))
a_pad = np.zeros(len(a)+len(lags))
b_pad = np.zeros(len(b)+len(lags))
st = np.argmin(np.abs(lags))
a_pad[st:st+len(a)] = a
b_pad[st:st+len(b)] = b
for i in range(len(lags)):
cors[i] = np.correlate(a_pad, np.roll(b_pad, lags[i]), 'valid')
return cors
def xcor_peak(a, b, lags):
'''Return the peak position in units of lags'''
N = len(lags)
xcs = xcor(a, b, lags)
return lags[np.argmax(xcs)]
# TODO: Document mpfit_* functions
def mpfit_residuals(modelfun):
def fun(param, fjac=None, x=None, y=None, error=None):
'''Generic function'''
model = modelfun(param, x)
status = 0
if error is None:
return [status, y-model]
return [status, (y-model)/error]
return fun
def mpfit_do(residual_fun, # function returned from mpfit_residuals() above
x, # input x
y, # input y = f(x)
parinfo, # initial parameter guess
error=None,
maxiter=20):
#TODO : Document parinfo part
fa = {"x": x, "y": y}
if error is not None:
fa["error"] = error
lsf = mpfit.mpfit(residual_fun, parinfo=parinfo, functkw=fa,
quiet=1, maxiter=maxiter)
return lsf
# MPFITPEAK
def gaussian(p, x):
''' gaussian model
p[0] -- scale factor
p[1] -- centroid
p[2] -- sigma
p[3] -- offset
p[4] -- slope
'''
u = (x - p[1])/p[2]
return p[0]*np.exp(-0.5*u*u) + p[3] + p[4]*x
def gaussian_residuals(p, fjac=None, x=None, y=None, error=None):
model = gaussian(p, x)
status = 0
delt = y-model
if error is None:
return [status, delt]
return [status, delt/error]
def multi_gaussian(p, x):
N = p[0]
sigma = p[1]
offset = p[2]
slope = p[3]
y = np.zeros(len(x))
j = 4
for i in range(np.int(N)):
y += gaussian([p[j], p[j+1], sigma, 0, 0], x)
j+=2
y += offset + slope*x
return y
def multi_gaussian_residuals(p, fjac=None, x=None, y=None, error=None):
model = multi_gaussian(p, x)
status = 0
delt = y - model
if error is None:
return [status, delt]
return [status, delt]
def mpfitpeak(x, y, error=None):
parinfo = [{"value": np.max(y), "fixed": 0, "name": "Peak Value",
'step': 10},
{"value": x[np.argmax(y)], "fixed": 0, "name": "Centroid",
'step': .1},
{"value": 1.1, "fixed": 0, "name": "Sigma",
'step': .1},
{"value": np.min(y), "fixed": 0, "name": "Offset",
'step': 10},
{"value": 0, "fixed": 0, "name": "Slope",
'step': 1e-5}]
fa = {"x": x, "y": y}
if error is not None: fa["error"] = error
return mpfit.mpfit(gaussian_residuals, parinfo=parinfo, functkw=fa, quiet=1)
def mpfitpeaks(x, y, N, error=None):
pars = [1, np.min(y), 0]
parinfo = [ {"value": N, "fixed": 1, "name": "Number of Peaks",
"limited": [0, 0], "limits": [0, 0]},
{"value": 1.6, "fixed": 0, "name": "Sigma",
"limited": [0, 0], "limits": [0, 0]},
{"value": pars[1], "fixed": 0, "name": "Offset",
"limited": [0, 0], "limits": [0, 0]},
{"value": pars[2], "fixed": 0, "name": "Slope",
"limited": [0, 0], "limits": [0, 0]}]
for i in range(N):
v = {"value": np.max(y)/2., "fixed": 0, "name": "Peak Value(%i)" % i,
"limited": [1, 0], "limits": [0, 0]}
pars.append(np.max(y))
parinfo.append(v)
v = {"value": x[np.argmax(y)], "fixed": 0, "name": "Centroid(%i)" % i}
pars.append(x[np.argmax(y)])
parinfo.append(v)
fa = {"x": x, "y": y}
if error is not None: fa["error"] = error
return mpfit.mpfit(multi_gaussian_residuals, parinfo=parinfo, functkw=fa,
quiet=1)
def slit_edge_fun(x, s):
''' The edge of a slit, convolved with a Gaussian, is well fit by
the error function. slit_edge_fun is a reexpression of the error
function in the classica gaussian "sigma" units'''
sq2 = np.sqrt(2)
sig = sq2 * s
return np.sqrt(np.pi/2.) * s * erf(x/sig)
def fit_bar_edge(p, x):
'''
Fitting function for a bar edge
'''
return p[0] + np.radians(4.2) * x
def fit_single(p, x):
'''
The fitting function used by do_fit_single. This is a single slit edge
p[0] ---> Sigma
p[1] ---> Horizontal offset
p[2] ---> Multipicative offset
p[3] ---> Additive offset
'''
return slit_edge_fun(x - p[1], p[0]) * p[2] + p[3]
def fit_pair(p, x):
'''
The fitting function ussed by "do_fit". The sum of two edge functions.
p[0] ---> Sigma
p[1] ---> Horizontal offset
p[2] ---> Multipicative offset
p[3] ---> Additive offset
p[4] ---> Width of slit
'''
return slit_edge_fun(x - p[1], p[0]) * p[2] + p[3] - slit_edge_fun(x
- p[1] - p[4], p[0]) * p[2]
def fit_disjoint_pair(p,x):
'''
The fitting function ussed by "do_fit". The sum of two edge functions.
p[0] ---> Sigma
p[1] ---> Horizontal offset
p[2] ---> Multipicative offset side 1
p[3] ---> Multiplicative offset side 2
p[4] ---> Additive offset
p[5] ---> Width of slit
'''
return slit_edge_fun(x - p[1], p[0]) * p[2] + p[4] - slit_edge_fun(x
- p[1] - p[5], p[0]) * p[3]
def residual(p, x, y, f):
'''The square of residual is minimized by the least squares fit.
Formally this is (f(x | p) - y)**2'''
return f(p, x) - y
def residual_wavelength(p, x, y):
return residual(p, x, y, fit_wavelength_model)
def residual_single(p, x, y):
'''Convenience funciton around residual'''
return residual(p, x, y, fit_single)
def residual_pair(p, x, y):
'''Convenience funciton around residual'''
return residual(p, x, y, fit_pair)
def residual_disjoint_pair(p, x, y):
'''Convenience funciton around residual'''
return residual(p, x, y, fit_disjoint_pair)
def residual_bar_edge(p, x, y):
return residual(p, x, y, fit_bar_edge)
def do_fit(data, residual_fun=residual_single):
'''do_fit estimates parameters of fit_pair or fit_single.
Use as follows:
p0 = [0.5, 6, 1.1, 3, 1]
ys = fit_single(p0, xs)
lsf = do_fit(ys, residual_single)
res = np.sum((lsf[0] - p0)**2)
'''
xs = np.arange(len(data))
if residual_fun==residual_single:
if data[0] > data[-1]:
p0 = [0.5, len(data)/2., max(data), 0.0, 3.0]
else:
p0 = [0.5, len(data)/2., -max(data), 0.0, 3.0]
elif residual_fun==residual_pair:
p0 = [0.5, np.argmax(data), max(data), 0.0, 4.0]
elif residual_fun==residual_disjoint_pair:
width = 5
p0 = [0.5,
np.argmin(data),
-np.ma.median(data[0:3]),
-np.ma.median(data[-4:-1]),
np.ma.median(data),
width]
else:
error("residual_fun not specified")
raise Exception("residual_fun not specified")
lsf = optimize.leastsq(residual_fun, p0, args=(xs, data),
full_output=True)
return lsf
def do_fit_edge(xs, ys):
p0 = [ys.mean()]
return optimize.leastsq(residual_bar_edge, p0, args=(xs, ys))
def polyfit_clip(xs, ys, order, nsig=2.5):
ff = np.poly1d(np.polyfit(xs, ys, order))
sd = np.std(ys - ff(xs))
if sd == 0.0:
warning('Clipping failed because stddev=0, using unclipped fit.')
result = ff
else:
r = np.abs(ys - ff(xs))
ok = r < (sd * nsig)
try:
result = np.polyfit(xs[ok], ys[ok], order)
except:
warning('Clipping failed, using unclipped fit.')
result = ff
return result
def polyfit_sigclip(xs, ys, order, nmad=4):
ok = np.ones(len(xs))>0.5
for i in range(5):
ff = np.poly1d(np.polyfit(xs[ok], ys[ok], order))
sd = np.median(np.abs(ys[ok] - ff(xs[ok])))
r = np.abs(ys - ff(xs))
ok = r < (sd * nmad)
return np.polyfit(xs[ok], ys[ok], order)
class TestFitFunctions(unittest.TestCase):
def setUp(self):
pass
def test_do_fit(self):
import random
sa = self.assertTrue
xs = np.arange(19)
p0 = [0.5, 6, 1.1, 3, 1]
ys = fit_single(p0, xs)
lsf = do_fit(ys, residual_single)
res = np.sum((fit_single(lsf[0],xs) - ys)**2)
sa(res < 0.001)
def test_do_fit2(self):
sa = self.assertTrue
p0 = [0.5, 6, 1.1, 3, 3]
xs = np.arange(15)
ys = fit_pair(p0, xs)
lsf = do_fit(ys, residual_pair)
res = np.sum((lsf[0] - p0)**2)
sa(res < 0.001)
def test_lhs_v_rhs(self):
sa = self.assertTrue
p0 = [0.5, 5, 1.1,.7,0]
pn0 = [0.5, 5, -1.1,.7,0]
xs = np.arange(25)
ys = fit_single(p0, xs)
lsf = do_fit(ys, residual_single)
info(str(lsf[0]))
ys = fit_single(pn0, xs)
lsf = do_fit(ys, residual_single)
info(str(lsf[0]))
if __name__ == '__main__':
unittest.main()
def do_fit_wavelengths(pixels, lambdas, alphaguess,
sinbetaguess, gammaguess, deltaguess, band, pixel_y, error=None):
''' THIS HSOULD BE REMOVED'''
bmap = {"Y": 6, "J": 5, "H": 4, "K": 3}
order = bmap[band]
parinfo = [
{'fixed': 1, 'value': order, 'parname': 'order',
'limited': [0,0], 'limits': [0,0]},
{'fixed': 1, 'value': pixel_y, 'parname': 'Y',
'limited': [0,0], 'limits': [0,0]},
{'fixed': 0, 'value': alphaguess, 'parname': 'alpha', 'step': 1e-5,
'limited': [0,0], 'limits': [0,0]},
{'fixed': 0, 'value': sinbetaguess, 'parname': 'sinbeta',
'step': 1e-5, 'limited': [0,0], 'limits': [30,50]},
{'fixed': 0, 'value': gammaguess, 'parname': 'gamma','step': 1e-15,
'limited': [1,1], 'limits': [0,20e-13]},
{'fixed': 0, 'value': deltaguess, 'parname': 'delta', 'step': 1e-1,
'limited': [1,1], 'limits': [0,2048]},
]
fa = {"x": pixels, "y": lambdas}
if error is not None:
fa["error"] = error
lsf = mpfit.mpfit(wavelength_residuals, parinfo=parinfo, functkw=fa,
quiet=1, maxiter=20)
return lsf
| 11,323 | 25.457944 | 80 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Filters.py |
'''
MOSFIRE Filter information
[YJHK]hpp : the half power points of filters in micron
'''
hpp = {
'Y': [9750.0, 11240.0],
'J': [11530.0, 13520.0],
'H': [14500.0, 18225.0],
'K': [19210.0, 24040.0]
}
| 253 | 13.111111 | 58 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/IO.py | '''
MOSFIRE Input/Output Utility Code
Written March 2, 2011 by npk
Provides tools to read fits files and parse their headers.
'''
try:
from astropy.io import fits as pf
except:
import pyfits as pf
import numpy as np
import unittest
import warnings
import re
import os
import pdb
import shutil
import ccdproc
import MOSFIRE
from MOSFIRE import CSU, Options
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
theBPM = None # the Bad pixel mask
from astropy import __version__ as av
if av in ['1.3', '1.3.0']:
err_str = 'Astropy {} is incompatible with MOSFIRE FITS files.'.format(av)
error(err_str)
error('Upgrade or downgrade astropy before running the MOSFIRE DRP.')
raise ImportError(err_str)
def badpixelmask():
global theBPM
path = Options.path_bpm
if theBPM is None:
hdulist = pf.open(path)
header = hdulist[0].header
theBPM = hdulist[0].data
hdulist.close()
return theBPM
def load_edges(maskname, band, options):
''' Load the slit edge functions. Returns (edges, metadata) '''
if False:
path = os.path.join(options["outdir"], maskname)
fn = os.path.join(path, "slit-edges_{0}.npy".format(band))
fn = "slit-edges_{0}.npy".format(band)
try:
edges = np.load(fn)
except:
error("Cannot load slit edges file")
raise Exception("Cannot load slit edges file")
edges,meta = edges[0:-1], edges[-1]
if meta['maskname'] != maskname:
warning("The maskname for the edge file '%s' does not match "
"that in the edge file '%s'" % (maskname, meta['maskname']))
warning("Continuing")
return edges, meta
def load_lambdacenter(fnum, maskname, options):
''' Load the wavelength coefficient functions '''
if False:
path = os.path.join(options["outdir"], maskname)
fn = os.path.join(path, "lambda_center_coeffs_{0}.npy".format(fnum))
fn = "lambda_center_coeffs_{0}.npy".format(fnum)
ld = np.load(fn)
return ld
def load_lambdadata(wavename, maskname, band, options):
''' Load the wavelength coefficient functions '''
if False:
fn = os.path.join(options["outdir"], maskname,
"lambda_coeffs_{0}.npy".format(wavename))
fn = "lambda_coeffs_{0}.npy".format(wavename)
ld = np.load(fn)
return ld
def load_lambdaoutwards(fnum, maskname, band, options):
''' Load the wavelength coefficient functions '''
if False:
path = os.path.join(options["outdir"], maskname)
fn = os.path.join(path, "lambda_outwards_coeffs_{0}.npy".format(fnum))
fn = "lambda_outwards_coeffs{0}.npy".format(fnum)
ld = np.load(fn)
return ld
def load_lambdamodel(fnum, maskname, band, options):
''' Load the wavelength coefficient functions '''
if False:
path = os.path.join(options["outdir"], maskname)
fn = os.path.join(path, "lambda_mask_coeffs_{0}.npy".format(fnum))
fn = "lambda_mask_coeffs_{0}.npy".format(fnum)
ld = np.load(fn)
return ld
def load_lambdaslit(fnum, maskname, band, options):
''' Load the wavelength coefficient functions '''
if False:
path = os.path.join(options["outdir"], maskname)
fn = os.path.join(path, "lambda_solution_{0}.fits".format(fnum))
fn = "lambda_solution_{0}.fits".format(fnum)
print(fn)
ret = readfits(fn, options)
if ret[0]['filter'] != band:
error ("Band name mismatch")
raise Exception("band name mismatch")
if ret[0]['maskname'] != maskname:
warning("The maskname for the edge file '%s' does not match "
"that in the edge file '%s'" % (maskname, ret[0]['maskname']))
warning("Continuing")
return readfits(fn, options)
def writefits(img, maskname, fname, options, header=None, bs=None,
overwrite=False, lossy_compress=False):
'''Convenience wrapper to write MOSFIRE drp-friendly FITS files
Args:
img: Data array to write to disk
maskname: Name of the science mask
fname: Full or relative path to output file
options: {} Unused
header: Optional, the header to write
bs: Optional unused
overwrite: Force overwrite of file, default False/No.
lossy_compress: Zero out the lowest order bits of the floats in
order to make FITS files amenable to compression. The loss is
at least 10 x less than 5e- which is the lowest reasonable read-
noise value.
Results:
Writes a file to fname with data img and header header.
'''
if lossy_compress:
hdu = pf.PrimaryHDU(floatcompress(img))
else:
hdu = pf.PrimaryHDU(img)
fn = fname
if header is None: header = {"DRPVER": (MOSFIRE.__version__, "DRP Version Date")}
else: header["DRPVER"] = (MOSFIRE.__version__, 'DRP Version Date')
warnings.filterwarnings('ignore')
if header is not None:
for k,value, comment in header.cards:
if k in hdu.header: continue
if k == 'COMMENT': continue
if k == '': continue
k = k.rstrip()
hdu.header[k] = (value,comment)
warnings.filterwarnings('always')
if overwrite:
try:
os.remove(fn)
debug("Removed old file '{0}'".format(fn))
except: pass
info("Wrote to '%s'" % (fn))
warnings.filterwarnings('ignore','Card is too long, comment will be truncated.')
hdu.writeto(fn)
warnings.filterwarnings('always')
if lossy_compress: os.system("gzip --force {0}".format(fn))
def readfits(path, use_bpm=False):
'''Read a fits file from path and return a tuple of (header, data,
Target List, Science Slit List (SSL), Mechanical Slit List (MSL),
Alignment Slit List (ASL)).'''
if os.path.exists(path + ".gz"):
path = path + ".gz"
if not os.path.exists(path):
error("The file at path '%s' does not exist." % path)
raise Exception("The file at path '%s' does not exist." % path)
hdulist = pf.open(path)
header = hdulist[0].header
data = hdulist[0].data
datasec = ""
try:
datasec = header["DATASEC"]
debug("%s contains a DATASEC keyword not compatible with the pipeline" % path)
debug("The content of the keyword will be erased on the reduced data")
del header["DATASEC"]
except:
pass
if use_bpm:
theBPM = badpixelmask()
data = np.ma.masked_array(data, theBPM, fill_value=0)
hdulist.close()
return (header, data)
def readheader(path):
'''Reads a header (only) from a fits file'''
return pf.getheader(path)
def read_drpfits(maskname, fname, options):
'''Read a fits file written by the DRP'''
if os.path.exists(fname): path = fname
elif os.path.exists(fname + ".gz"): path = fname + ".gz"
else: path = os.path.join(fname_to_path(fname, options), fname)
if os.path.exists(path + ".gz"):
path = path + ".gz"
if not os.path.exists(path):
error("The file at path '%s' does not exist." % path)
raise Exception("The file at path '%s' does not exist." % path)
hdulist = pf.open(path)
output = []
for hdu in hdulist:
output.append(hdu.header)
if "DRPVER" in hdu.header:
itsver = hdu.header["DRPVER"]
if itsver != MOSFIRE.__version__:
error("The file requested '%s' uses DRP version %f "
"but the current DRP version is %f. There might be an "
"incompatibility" % (path, itsver, MOSFIRE.__version__))
raise Exception("The file requested '%s' uses DRP version %f "
"but the current DRP version is %f. There might be an "
"incompatibility" % (path, itsver, MOSFIRE.__version__))
else:
error("The file requested '%s' does not seem to be "
"the result of this DRP. This should never be the "
" case.")
raise Exception("The file requested '%s' does not seem to be "
"the result of this DRP. This should never be the "
" case.")
output.append(hdu.data)
hdulist.close()
return output
def fname_to_date_tuple(fname):
'''Take a filename like m120507_0123, return 12may07'''
months = {"01": "jan", "02": "feb", "03": "mar", "04": "apr", "05": "may",
"06": "jun", "07": "jul", "08": "aug", "09": "sep", "10": "oct",
"11": "nov", "12": "dec"}
if len(fname) != 17:
raise Exception("The file name '%s' is not of correct length. It "
"must be of the form mYYmmdd_nnnn.fits" % fname)
try:
fdate = fname.split("m")[1][0:6]
yr, mn, dy = "20" + fdate[0:2], fdate[2:4], int(fdate[4:6])
month = months[mn]
except:
warning("Could not parse date out of file name: %s" % (fname))
return yr, month, dy
def fname_to_path(fname, options):
'''Take a filename like m120507_0123, parse date, and return full path'''
if os.path.isabs(fname): return fname
yr, month, dy = fname_to_date_tuple(fname)
path = os.path.join(options["indir"], yr + month + "%2.2i" % dy)
if not os.path.exists(os.path.join(path, fname)):
path = os.path.join(options["indir"], yr + month + "%2.2i" % (dy-1))
if not os.path.exists(path):
error("Could not find file '%s' in '%s' out of parsed "
"%s, %s, %s" % (fname,
options["indir"], yr, month, dy))
raise Exception("Could not find file '%s' in '%s' out of parsed "
"%s, %s, %s" % (fname,
options["indir"], yr, month, dy))
return path
def list_file_to_strings(fname):
'''Read the filename in fname and convert to a series of paths.
This emulates IRAF's @file system. However, in addtion, the first line of the file
can be an absolute path. Example:
list.txt
/path/to/files
file1
file2
file3
returns ['/path/to/files/file1', '/path/to/files/file2', '/path/to/files/file3']
whereas
list.txt
file1
file2
file3
returns ['file1', 'file2', 'file3']
'''
filelist = fname
if type(fname) == str:
filelist = [fname]
if len(fname) == 0:
return []
if fname[0][-5:] == '.fits':
return fname
output = []
for fname in filelist:
debug( "Loading: %s" % fname)
inputs = np.genfromtxt(fname,dtype=str)
path = ""
start_index = 0
if len(inputs):
if os.path.isabs(inputs[0][0]):
path = inputs[0]
start_index = 1
for i in range(start_index, len(inputs)):
output.append(os.path.join(path, inputs[i]))
return output
def fix_long2pos_headers(filelist):
'''Fixes old long2pos observations which have a wrong set of keywords'''
files = list_file_to_strings(filelist)
# Print the filenames to Standard-out
info("Fixing long2pos headers for files in "+str(filelist))
# Iterate through files
for fname in files:
if os.path.isabs(fname): path = fname
else: path = os.path.join(fname_to_path(fname, options), fname)
hdulist = pf.open(path, mode='update')
header = hdulist[0].header
# determine if this file really needs to be updated (for example,
# prevents a second update of an already updated file
if 'long2pos' in header['MASKNAME'] and header['FRAMEID']=='object'\
and (header['PATTERN']=='long2pos' or header['PATTERN']=='Stare'):
info( "File "+str(fname)+" will be updated")
# make a copy of the original file
newname = path+".original"
info("copying ... "+str(path))
info("into ...... "+str(newname))
shutil.copyfile(path,newname)
if not os.path.exists(newname):
errstr = "Error in generating original file: '%s' does not exist"\
"(could not be created)." % newname
error(errstr)
raise Exception(errstr)
#updating header
# assign FRAMEID to narrow slits
if header['YOFFSET']==21 or header['YOFFSET']==-7:
header['FRAMEID']="B"
if header['YOFFSET']==-21 or header['YOFFSET']==7:
header['FRAMEID']="A"
# assign FRAMEID to wide slits
if header['YOFFSET']==14 or header['YOFFSET']==-14:
header['FRAMEID']="A"
#reverse sign of offsets for narrow slits
if header['YOFFSET']==-21:
header['YOFFSET']=7
if header['YOFFSET']==21:
header['YOFFSET']=-7
#transform Xoffset from pixels to arcseconds
header['XOFFSET'] = header['XOFFSET']*0.18
else:
info("File "+str(fname)+" does not need to be updated")
hdulist.flush()
hdulist.close()
def readmosfits(fname, options, extension=None):
'''Read a fits file written by MOSFIRE from path and return a tuple of
(header, data, Target List, Science Slit List (SSL), Mechanical Slit
List (MSL), Alignment Slit List (ASL)).
Note, the extension is typically not used, only used if the detector server
does not append slit extension.
'''
if os.path.isabs(fname): path = fname
else: path = os.path.join(fname_to_path(fname, options), fname)
hdulist = pf.open(path)
header = hdulist[0].header
data = hdulist[0].data
theBPM = badpixelmask()
data = np.ma.masked_array(data, theBPM)
if extension is not None:
hdulist = pf.open(extension)
try:
header = hdulist[0].header
datasec = ""
try:
datasec = header["DATASEC"]
debug("%s contains a DATASEC keyword not compatible with the pipeline" % path)
debug("The content of the keyword will be erased on the reduced data")
del header["DATASEC"]
except:
pass
targs = hdulist[1].data
ssl = hdulist[2].data
msl = hdulist[3].data
asl = hdulist[4].data
except:
error("Improper MOSFIRE FITS File: %s" % path)
raise Exception("Improper MOSFIRE FITS File: %s" % path)
# if np.abs(header["REGTMP1"] - 77) > 0.1:
# warning("**************************************")
# warning("The temperature of the detector is %3.3f where it "
# "should be 77.000 deg. Please notify Keck support staff." %
# header["REGTMP1"])
ssl = ssl[ssl.field("Slit_Number") != ' ']
msl = msl[msl.field("Slit_Number") != ' ']
asl = asl[asl.field("Slit_Number") != ' ']
# ELIMINATE POSITION B of the long2pos slit
ssl = ssl[ssl.field("Target_Name") != 'posB']
msl = msl[msl.field("Target_in_Slit") != 'posB']
asl = asl[asl.field("Target_in_Slit") != 'posBalign']
targs = targs[targs.field("Target_Name") !='posB']
targs = targs[targs.field("Target_Name") != "posBalign"]
bs = CSU.Barset()
bs.set_header(header, ssl=ssl, msl=msl, asl=asl, targs=targs)
hdulist.close()
return (header, data, bs)
def readscitbl(path):
print(path)
hdulist = pf.open(path)
header = hdulist[0].header
try:
targs = hdulist[1].data
ssl = hdulist[2].data
msl = hdulist[3].data
asl = hdulist[4].data
except:
warning("Improper MOSFIRE FITS File: %s" % path)
hdulist.close()
return header, targs, ssl, msl, asl
def parse_header_for_bars(header):
'''Parse {header} and convert to an array of CSU bar positions in mm. If
the positon is negative it means the barstat is not OK'''
poss = []
posfmt = "B%2.2iPOS"
statfmt = "B%2.2iSTAT"
for i in range(1,CSU.numbars+1):
p = posfmt % i
s = statfmt % i
pos = np.float32(header[p])
if (header[s] != 'OK') and (header[s] != 'SETUP'):
pos *= -1
poss.append(pos)
if len(poss) != CSU.numbars:
error("Found %i bars instead of %i" %
(lens(poss), CSU.numbars))
raise CSU.MismatchError("Found %i bars instead of %i" %
(lens(poss), CSU.numbars))
return np.array(poss)
def floatcompress(data, ndig=14):
'''Adapted from Finkbeiner IDL routine floatcompress'''
t = data.dtype
if not ((t == 'float32') or (t == 'float64')):
error("Only works on floating point numbers")
raise Exception("Only works on floating point numbers")
wzer = np.where(data == 0)
data[wzer] = 1.0
log2 = np.ceil(np.log(np.abs(data)) / np.log(2.0))
mant = np.round(data/2.0**(log2 - ndig))/2.0**ndig
out = mant*2.0**log2
out[wzer] = 0.0
return out
def imarith(operand1, op, operand2, result):
info( "%s %s %s -> %s" % (operand1, op, operand2, result))
assert type(operand1) == str
assert type(operand2) == str
assert os.path.exists(operand1)
assert os.path.exists(operand2)
assert op in ['+', '-']
## Strip off the [0] part of the operand as we are assuming that we are
## operating on the 0th FITS HDU.
if re.match('(\w+\.fits)\[0\]', operand1):
operand1 = operand1[:-3]
if re.match('(\w+\.fits)\[0\]', operand2):
operand2 = operand2[:-3]
import operator
operation = { "+": operator.add, "-": operator.sub,\
"*": operator.mul, "/": operator.truediv}
hdulist1 = pf.open(operand1, 'readonly')
hdulist2 = pf.open(operand2, 'readonly')
data = operation[op](hdulist1[0].data, hdulist2[0].data)
header = hdulist1[0].header
header['history'] = 'imarith {} {} {}'.format(operand1, op, operand2)
header['history'] = 'Header values copied from {}'.format(operand1)
if 'EXPTIME' in hdulist1[0].header and\
'EXPTIME' in hdulist2[0].header and\
operation in ['+', '-']:
exptime = operation[op](float(hdulist1[0].header['EXPTIME']),\
float(hdulist2[0].header['EXPTIME']))
header['history'] = 'Other than exposure time which was edited'
header['EXPTIME'] = exptime
hdu = pf.PrimaryHDU(data=data, header=header)
hdu.writeto(result)
hdulist1.close()
hdulist2.close()
def imcombine(filelist, out, options, method="average", reject="none",\
lsigma=3, hsigma=3, mclip=False,\
nlow=None, nhigh=None):
'''Combines images in input list with optional rejection algorithms.
Args:
filelist: The list of files to imcombine
out: The full path to the output file
method: either "average" or "median" combine
options: Options dictionary
bpmask: The full path to the bad pixel mask
reject: none, minmax, sigclip
nlow,nhigh: Parameters for minmax rejection, see iraf docs
mclip: use median as the function to calculate the baseline values for
sigclip rejection?
lsigma, hsigma: low and high sigma rejection thresholds.
Returns:
None
Side effects:
Creates the imcombined file at location `out'
'''
assert method in ['average', 'median']
if os.path.exists(out):
os.remove(out)
if reject == 'none':
info('Combining files using ccdproc.combine task')
info(' reject=none')
for file in filelist:
debug(' Combining: {}'.format(file))
ccdproc.combine(filelist, out, method=method,\
minmax_clip=False,\
iraf_minmax_clip=True,\
sigma_clip=False,\
unit="adu")
info(' Done.')
elif reject == 'minmax':
## The IRAF imcombine minmax rejection behavior is different than the
## ccdproc minmax rejection behavior. We are using the IRAF like
## behavior here. To support this a pull request for the ccdproc
## package has been made:
## https://github.com/astropy/ccdproc/pull/358
##
## Note that the ccdproc behavior still differs slightly from the
## nominal IRAF behavior in that the rejection does not consider whether
## any of the rejected pixels have been rejected for other reasons, so
## if nhigh=1 and that pixel was masked for some other reason, the
## new ccdproc algorithm, will not mask the next highest pixel, it will
## still just mask the highest pixel even if it is already masked.
##
## From IRAF (help imcombine):
## nlow = 1, nhigh = 1 (minmax)
## The number of low and high pixels to be rejected by the
## "minmax" algorithm. These numbers are converted to fractions
## of the total number of input images so that if no rejections
## have taken place the specified number of pixels are rejected
## while if pixels have been rejected by masking, thresholding, or
## non-overlap, then the fraction of the remaining pixels,
## truncated to an integer, is used.
##
## Check that minmax rejection is possible given the number of images
if nlow is None:
nlow = 0
if nhigh is None:
nhigh = 0
if nlow + nhigh >= len(filelist):
warning('nlow + nhigh >= number of input images. Combining without rejection')
nlow = 0
nhigh = 0
if ccdproc.version.major >= 1 and ccdproc.version.minor >= 1\
and ccdproc.version.release:
info('Combining files using ccdproc.combine task')
info(' reject=clip_extrema')
info(' nlow={}'.format(nlow))
info(' nhigh={}'.format(nhigh))
for file in filelist:
info(' {}'.format(file))
ccdproc.combine(filelist, out, method=method,\
minmax_clip=False,\
clip_extrema=True,\
nlow=nlow, nhigh=nhigh,\
sigma_clip=False,\
unit="adu")
info(' Done.')
else:
## If ccdproc does not have new rejection algorithm in:
## https://github.com/astropy/ccdproc/pull/358
## Manually perform rejection using ccdproc.combiner.Combiner object
info('Combining files using local clip_extrema rejection algorithm')
info('and the ccdproc.combiner.Combiner object.')
info(' reject=clip_extrema')
info(' nlow={}'.format(nlow))
info(' nhigh={}'.format(nhigh))
for file in filelist:
info(' {}'.format(file))
ccdlist = []
for file in filelist:
ccdlist.append(ccdproc.CCDData.read(file, unit='adu', hdu=0))
c = ccdproc.combiner.Combiner(ccdlist)
nimages, nx, ny = c.data_arr.mask.shape
argsorted = np.argsort(c.data_arr.data, axis=0)
mg = np.mgrid[0:nx,0:ny]
for i in range(-1*nhigh, nlow):
where = (argsorted[i,:,:].ravel(),
mg[0].ravel(),
mg[1].ravel())
c.data_arr.mask[where] = True
if method == 'average':
result = c.average_combine()
elif method == 'median':
result = c.median_combine()
for key in list(ccdlist[0].header.keys()):
header_entry = ccdlist[0].header[key]
if key != 'COMMENT':
result.header[key] = (header_entry,
ccdlist[0].header.comments[key])
hdul = result.to_hdu()
# print(hdul)
# for hdu in hdul:
# print(type(hdu.data))
hdul[0].writeto(out)
# result.write(out)
info(' Done.')
elif reject == 'sigclip':
info('Combining files using ccdproc.combine task')
info(' reject=sigclip')
info(' mclip={}'.format(mclip))
info(' lsigma={}'.format(lsigma))
info(' hsigma={}'.format(hsigma))
baseline_func = {False: np.mean, True: np.median}
ccdproc.combine(filelist, out, method=method,\
minmax_clip=False,\
clip_extrema=False,\
sigma_clip=True,\
sigma_clip_low_thresh=lsigma,\
sigma_clip_high_thresh=hsigma,\
sigma_clip_func=baseline_func[mclip],\
sigma_clip_dev_func=np.std,\
)
info(' Done.')
else:
raise NotImplementedError('{} rejection unrecognized by MOSFIRE DRP'.format(reject))
class TestIOFunctions(unittest.TestCase):
def setUp(self):
pass
def test_readfits(self):
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| 25,201 | 32.873656 | 92 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Detector.py | '''
MOSFIRE Detector Code
File contains values associated with the Hawaii-2RG detector.
pixelsize of 18 micron is provided by Teledyne.
'''
import numpy as np
mm = 1.0
pixelsize = 0.018 * mm
npix = (2048, 2048)
gain = 2.15 # From MOSFIRE pre ship review pg. 125
RN = 21.0
| 281 | 12.428571 | 61 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Combine.py |
import os
import pdb
import numpy as np
try:
from astropy.io import fits as pf
except:
import pyfits as pf
import MOSFIRE
from MOSFIRE import Background, IO, Wavelength
def imcombine(filelist, maskname, fname, options, sum_type):
''' combine the images in file list into fname.
Sum type:
rate -- filelist is in cnt/s
ivar-rate -- filelist is in s/cnt
snr-rate -- filelist is in SNR
'''
ARR = None
hdr = None
i = 1
itime = 0
for file in filelist:
this_hdr, img = IO.readfits(file)
cards = this_hdr.ascardlist()
thisitime = this_hdr['truitime']
itime += thisitime
if ARR is None: ARR = np.zeros(img.shape)
if sum_type == 'rate': ARR += img * thisitime
if sum_type == 'ivar-rate': ARR += thisitime/img
if sum_type == 'snr-rate': ARR += img * thisitime
if hdr is None:
hdr = this_hdr
hdr["fno%2.2i" % i] = (file, "--")
for card in cards:
key, value, comment = (card.key, card.value, card.comment)
if hdr.has_key(key) and hdr[key] != value:
key = key + ("_img%2.2i" % i)
if len(key) > 8: key = 'HIERARCH ' + key
try:
hdr[key] = (value, comment)
except ValueError:
pass
hdr['itime'] = (itime, 'Itime for %i rectified images' % len(filelist))
if sum_type == 'rate': ARR /= itime
if sum_type == 'ivar-rate': ARR = itime/ARR
if sum_type == 'snr-rate': ARR /= itime
IO.writefits(ARR, maskname, fname, options, header=hdr, overwrite=True,
lossy_compress=True)
def get_path(a):
if os.path.exists(a): return a
a += ".gz"
if os.path.exists(a): return a
raise Exception("No such path: %s" % a)
def gz(name):
if name[-2:] == 'gz': return '.gz'
else: return ''
def stack_rectified(wavenames, maskname, band, wavops):
N = len(wavenames)
lamnames = []
suffixs = []
for i in xrange(N):
lamnames.append( Wavelength.filelist_to_wavename(wavenames[i], band,
maskname, wavops).rstrip(".fits"))
suffixs.append(lamnames[i].lstrip("wave_stack_%s_" % band))
path = os.path.join(wavops["outdir"], maskname)
recs = []
ivars = []
sns = []
try:
ls = [get_path(os.path.join(path, "eps_%s_%s_%s.fits") % (maskname,
suffix, band)) for suffix in suffixs]
imcombine(ls, maskname, "eps_%s_%s.fits" % (maskname, band),
wavops, 'rate')
except:
pass
try:
ls = [get_path(os.path.join(path, "ivars_%s_%s_%s.fits") % (maskname,
suffix, band)) for suffix in suffixs]
imcombine(ls, maskname, "ivars_%s_%s.fits" % (maskname,
band), wavops, 'ivar-rate')
except:
pass
try:
ls = [get_path(os.path.join(path, "snrs_%s_%s_%s.fits") % (maskname,
suffix, band)) for suffix in suffixs]
imcombine(ls, maskname, "snrs_%s_%s.fits" % (maskname, band),
wavops, 'snr-rate')
except:
pass
def stack_slits(wavenames, maskname, band, wavops):
pass
def stack_full(wavenames, maskname, band, wavops):
pass
def stack_files(wavenames, maskname, band, wavops):
stack_rectified(wavenames, maskname, band, wavops)
stack_slits(wavenames, maskname, band, wavops)
stack_full(wavenames, maskname, band, wavops)
def rename_files(wavenames, maskname, band, wavops):
lamname = Wavelength.filelist_to_wavename(wavenames[0], band, maskname,
wavops).rstrip(".fits")
suffix = lamname.lstrip("wave_stack_%s_" % band)
path = os.path.join(wavops["outdir"], maskname)
fnames = ["rectified_%s%s.fits", "rectified_ivar_%s%s.fits",
"rectified_sn_%s%s.fits"]
for fname in fnames:
try:
a = get_path(os.path.join(path, fname % (band, "_" + suffix)))
b = os.path.join(path, fname % (band, "")) + gz(a)
os.rename(a, b)
except:
print("Ignoring renaming of: ", fname)
pass
edges = IO.load_edges(maskname, band, wavops)
n_slits = len(edges[0])
for i in xrange(1, n_slits+1):
S = "S%2.2i" % (i)
a = get_path(os.path.join(path,
"eps_%s_%s_%s.fits" % (band, suffix, S)))
a_h = pf.open(a)[0].header
obj = a_h['object']
b = os.path.join(path, "%s_%s_%s_eps.fits" % (maskname, band, obj)) + \
gz(a)
os.rename(a,b)
a = get_path(os.path.join(path,
"ivar_%s_%s_%s.fits" % (band, suffix, S)))
a_h = pf.open(a)[0].header
obj = a_h['object']
b = os.path.join(path, "%s_%s_%s_ivar.fits" % (maskname, band, obj)) + \
gz(a)
os.rename(a,b)
a = get_path(os.path.join(path,
"eps_%s_%s_%s.fits" % (maskname, suffix, band)))
b = os.path.join(path,
"%s_%s_eps.fits" % (maskname, band)) + gz(a)
os.rename(a,b)
a = get_path(os.path.join(path,
"snrs_%s_%s_%s.fits" % (maskname, suffix, band)))
b = os.path.join(path,
"%s_%s_snrs.fits" % (maskname, band)) + gz(a)
os.rename(a, b)
a = get_path(os.path.join(path,
"ivars_%s_%s_%s.fits" % (maskname, suffix, band)))
b = os.path.join(path,
"%s_%s_ivars.fits" % (maskname, band)) + gz(a)
os.rename(a, b)
def handle_combine(wavenames, maskname, band, wavops):
N = len(wavenames)
assert(N > 0)
print("Starting")
if N == 1: rename_files(wavenames, maskname, band, wavops)
if N > 1: stack_files(wavenames, maskname, band, wavops)
| 5,698 | 25.755869 | 80 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Bspline.py | '''
Convenience functions for Bspline
'''
import numpy as np
try:
from astropy.io import fits as pf
except:
import pyfits as pf
import CSU
import unittest
def array_to_tuple(A):
xs = []
ys = []
zs = []
for x in np.arange(A.shape[0]):
for y in np.arange(A.shape[1]):
xs.append(x)
ys.append(y)
zs.append(A[x,y])
return list(map(np.array, (xs, ys, zs)))
class TestBsplineFunctions(unittest.TestCase):
def setUp(self):
pass
def test_array_to_tuple(self):
sa = self.assertTrue
A = np.array([[1,2], [3,4]])
(xs,ys,zs) = array_to_tuple(A)
sa(zs[0] == 1)
sa(zs[1] == 2)
sa(zs[2] == 3)
sa(zs[3] == 4)
for cnt in range(len(xs)):
i = xs[cnt]
j = ys[cnt]
sa(A[i, j] == zs[cnt])
cnt += 1
if __name__ == '__main__':
unittest.main()
| 1,134 | 19.636364 | 48 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Flats.py | '''
===================
MOSFIRE Flat Fields
===================
npk April 14th 2011
Modifications:
2013-08-29: T. Do - added 'edgeThreshold' keyword as well as changed the initial guess for the edge location when using long slits
2014-01-09: MK - Functions added to subtract dome "lamps off/thermal" flats from the dome "lamps on" flats.
The functions combine both sets of flats using your current method to create a lamps on and
lamps off flat, and then subtracts those two images to remove the contribution of the dome
emission. Files renamed combflat_lamps_on* and combflat_lamps_off*. The final flat has the
same name that you output: combflat_2s_band_.fits .
To reduce the thermal flats, the flat functions have an optional keyword for the
"lampOffList" that acts as the reduction trigger. The driver file should include a call
like the example below.
Flats.handle_flats('Flat.txt', maskname, band, flatops, lampOffList='FlatThermal.txt')
2014-04-27: MK - Nick released a new version of the DRP in March. I merged the differences between the code
that I downloaded March 14 (Nick K.'s updated code to use file lists) with the code we
developed to use the thermal flatts.
2014-06-12: MK - Nick released a new version of the DRP in June 2013. I merged the differences between the code
modified on April 27 and the 10 June release. Tested with K-band data and appears to work
as expected.
'''
import os
import time
import unittest
import numpy as np
from matplotlib import pyplot as pl
import scipy, scipy.ndimage
try:
from astropy.io import fits as pf
except:
import pyfits as pf
import pdb
import MOSFIRE
from MOSFIRE import Fit, IO, Options, CSU, Wavelength, Filters, Detector
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
__version__ = 0.1
#from IPython.Shell import IPShellEmbed
#start_shell = IPShellEmbed()
def handle_flats(flatlist, maskname, band, options, extension=None,edgeThreshold=450,lampOffList=None,longslit=None):
'''
handle_flats is the primary entry point to the Flats module.
handle_flats takes a list of individual exposure FITS files and creates:
1. A CRR, dark subtracted, pixel-response flat file.
2. A set of polynomials that mark the edges of a slit
Inputs:
flatlist:
maskname: The name of a mask
band: A string indicating the bandceil
Outputs:
file {maskname}/flat_2d_{band}.fits -- pixel response flat
file {maskname}/edges.np
'''
tick = time.time()
# Check
bpos = np.ones(92) * -1
#Retrieve the list of files to use for flat creation.
flatlist = IO.list_file_to_strings(flatlist)
if len(flatlist) == 0:
print('WARNING: No flat files found.')
raise IOError('No flat files found')
# Print the filenames to Standard-out
for flat in flatlist:
debug(str(flat))
#Determine if flat files headers are in agreement
for fname in flatlist:
hdr, dat, bs = IO.readmosfits(fname, options, extension=extension)
try: bs0
except: bs0 = bs
if np.any(bs0.pos != bs.pos):
print("bs0: "+str(bs0.pos)+" bs: "+str(bs.pos))
error("Barset do not seem to match")
raise Exception("Barsets do not seem to match")
if hdr["filter"] != band:
error ("Filter name %s does not match header filter name "
"%s in file %s" % (band, hdr["filter"], fname))
raise Exception("Filter name %s does not match header filter name "
"%s in file %s" % (band, hdr["filter"], fname))
for i in range(len(bpos)):
b = hdr["B{0:02d}POS".format(i+1)]
if bpos[i] == -1:
bpos[i] = b
else:
if bpos[i] != b:
error("Bar positions are not all the same in "
"this set of flat files")
raise Exception("Bar positions are not all the same in "
"this set of flat files")
bs = bs0
# Imcombine the lamps ON flats
info("Attempting to combine files in {}".format(flatlist))
out = os.path.join("combflat_2d_{:s}.fits".format(band))
IO.imcombine(flatlist, out, options, reject="minmax", nlow=1, nhigh=1)
# Imcombine the lamps OFF flats and subtract the off from the On sets
if lampOffList != None:
#Retrieve the list of files to use for flat creation.
info("*********** Attempting to combine Lamps off files in {}".format(lampOffList))
lampOffList = IO.list_file_to_strings(lampOffList)
for flat in lampOffList:
debug(str(flat))
out = os.path.join("combflat_lamps_off_2d_{:s}.fits".format(band))
IO.imcombine(lampOffList, out, options, reject="minmax", nlow=1, nhigh=1)
file_on = os.path.join("combflat_2d_{:s}.fits".format(band))
file_off = os.path.join("combflat_lamps_off_2d_{:s}.fits".format(band))
file_on_save = os.path.join("combflat_lamps_on_2d_{:s}.fits".format(band))
IO.imarith(file_on, '-', file_off, file_on_save)
debug("Combined '%s' to '%s'" % (flatlist, maskname))
# info("Combined flats for '%s'" % (maskname))
path = "combflat_2d_%s.fits" % band
if lampOffList != None:
debug("Using on-off flat for K band")
path = os.path.join("combflat_lamps_on_2d_{:s}.fits".format(band))
(header, data) = IO.readfits(path, use_bpm=True)
info("Flat written to %s" % path)
# Edge Trace
if bs.long_slit:
info( "Long slit mode recognized")
info( "Central row position: "+str(longslit["row_position"]))
info( "Upper and lower limits: "+str(longslit["yrange"][0])+" "+str(longslit["yrange"][1]))
results = find_longslit_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit)
elif bs.long2pos_slit:
info( "Long2pos mode recognized")
results = find_long2pos_edges(data,header, bs, options, edgeThreshold=edgeThreshold, longslit=longslit)
else:
info('Finding slit edges in {}'.format(path))
results = find_and_fit_edges(data, header, bs, options,edgeThreshold=edgeThreshold)
results[-1]["maskname"] = maskname
results[-1]["band"] = band
np.save("slit-edges_{0}".format(band), results)
save_ds9_edges(results, options)
# Generate Flat
out = "pixelflat_2d_%s.fits" % (band)
if lampOffList != None:
make_pixel_flat(data, results, options, out, flatlist, lampsOff=True)
else:
make_pixel_flat(data, results, options, out, flatlist, lampsOff=False)
info( "Pixel flat took {0:6.4} s".format(time.time()-tick))
def make_pixel_flat(data, results, options, outfile, inputs, lampsOff=None):
'''
Convert a flat image into a flat field
'''
def pixel_min(y): return int(np.floor(np.min(y)))
def pixel_max(y): return int(np.ceil(np.max(y)))
def collapse_flat_box(dat):
'''Collapse data to the spectral axis (0)'''
v = np.median(dat, axis=0).ravel()
return v
flat = np.ones(shape=Detector.npix)
hdu = pf.PrimaryHDU((data/flat).astype(np.float32))
hdu.header.set("version", __version__, "DRP version")
i = 0
for flatname in inputs:
nm = flatname.split("/")[-1]
hdu.header.set("infile%2.2i" % i, nm)
i += 1
slitno = 0
for result in results[0:-1]:
slitno += 1
#There seems to be a bit of an issue with this on Longslits, where it is being read as bites not as str
try:
hdu.header.set("targ%2.2i" % slitno, result["Target_Name"])
except ValueError: hdu.header.set("targ%2.2i" % slitno, str(result["Target_Name"], 'utf-8'))
bf = result["bottom"]
tf = result["top"]
try:
hpps = result["hpps"]
except:
error( "No half power points for this slit")
hpps = [0, Detector.npix[0]]
xs = np.arange(hpps[0], hpps[1])
top = pixel_min(tf(xs))
bottom = pixel_max(bf(xs))
hdu.header.set("top%2.2i" % slitno, top)
hdu.header.set("bottom%2.2i" % slitno, bottom)
info( "%s] Bounding top/bottom: %i/%i" % (result["Target_Name"],
bottom, top))
v = collapse_flat_box(data[bottom:top,hpps[0]:hpps[1]])
x2048 = np.arange(Options.npix)
v = np.poly1d(np.polyfit(xs,v,
options['flat-field-order']))(xs).ravel()
for i in np.arange(bottom-1, top):
flat[i,hpps[0]:hpps[1]] = v
info("Producing Pixel Flat...")
for r in range(len(results)-1):
theslit = results[r]
try:
bf = theslit["bottom"]
tf = theslit["top"]
except:
pdb.set_trace()
for i in range(hpps[0], hpps[1]):
top = int(np.floor(tf(i)))
bottom = int(np.ceil(bf(i)))
data[top:bottom, i] = flat[top:bottom,i]
hdu.data = (data/flat).astype(np.float32)
bad = np.abs(hdu.data-1.0) > 0.5
hdu.data[bad] = 1.0
hdu.data = hdu.data.filled(1)
if os.path.exists(outfile):
os.remove(outfile)
hdu.writeto(outfile)
info("Done.")
def save_ds9_edges(results, options):
'''
Create a ds9 file that saves the fit slit edge positions determined
by find_and_fit_edges
'''
ds9 = ''
W = Options.npix
delt = Options.npix/30.
S = 1
for i in range(len(results) - 1):
res = results[i]
top = res["top"]
bottom = res["bottom"]
for i in np.arange(W/delt):
x = delt * i
sx = x + 1
ex = x + delt + 1
sy = top(sx)
ey = top(ex)
smid = (top(sx) - bottom(sx)) / 2. + bottom(sx)
emid = (top(ex) - bottom(ex)) / 2. + bottom(sx)
# three quarter point
stq = (top(sx) - bottom(sx)) * 3./4. + bottom(sx)
etq = (top(ex) - bottom(ex)) * 3./4. + bottom(sx)
# one quarter point
soq = (top(sx) - bottom(sx)) * 1./4. + bottom(sx)
eoq = (top(ex) - bottom(ex)) * 1./4. + bottom(sx)
ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (sx, smid, ex, emid)
ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (sx, stq, ex, etq)
ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=red\n" % (sx, soq, ex, eoq)
ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (sx, sy, ex, ey)
if i == W//2:
ds9 += " # text={S%2.0i (%s)}" % (S,
res["Target_Name"])
ds9 += "\n"
sy = bottom(sx) + 1
ey = bottom(ex) + 1
if i == 10: txt=res["Target_Name"]
else: txt=""
ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0 color=blue text={%s}\n" % (sx, sy, ex, ey, txt)
# Vertical line indicating half power points
try:
hpps = res["hpps"]
sx = hpps[0] ; ex = hpps[0]
sy = bottom(sx) ; ey = top(sx)
ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (sx, sy, ex, ey)
sx = hpps[1] ; ex = hpps[1]
sy = bottom(sx) ; ey = top(sx)
ds9 += "line(%f, %f, %f, %f) # fixed=1 edit=0 move=0 rotate=0 delete=0\n" % (sx, sy, ex, ey)
except:
continue
band = results[-1]["band"]
fn = "slit-edges_%s.reg" % band
try:
f = open(fn,'w')
f.write(ds9)
f.close()
except IOError:
error("IO Error")
raise
except:
raise
def find_edge_pair(data, y, roi_width, edgeThreshold=450):
'''
find_edge_pair finds the edge of a slit pair in a flat
data[2048x2048]: a well illuminated flat field [DN]
y: guess of slit edge position [pix]
Keywords:
edgeThreshold: the pixel value below which we should ignore using
to calculate edges.
Moves along the edge of a slit image
- At each location along the slit edge, determines
the position of the demarcations between two slits
Outputs:
xposs []: Array of x positions along the slit edge [pix]
yposs []: The fitted y positions of the "top" edge of the slit [pix]
widths []: The fitted delta from the top edge of the bottom [pix]
scatters []: The amount of light between slits
The procedure is as follows
1: starting from a guess spatial position (parameter y), march
along the spectral direction in some chunk of pixels
2: At each spectral location, construct a cross cut across the
spatial direction; select_roi is used for this.
3: Fit a two-sided error function Fit.residual_disjoint_pair
on the vertical cross cut derived in step 2.
4: If the fit fails, store it in the missing list
- else if the top fit is good, store the top values in top vector
- else if the bottom fit is good, store the bottom values in bottom
vector.
5: In the vertical cross-cut, there is a minimum value. This minimum
value is stored as a measure of scattered light.
Another procedure is used to fit polynomials to these fitted values.
'''
def select_roi(data, roi_width):
v = data[int(y-roi_width):int(y+roi_width), int(xp)-2:int(xp)+2]
v = np.median(v, axis=1) # Axis = 1 is spatial direction
return v
xposs_top = []
yposs_top = []
xposs_top_missing = []
xposs_bot = []
yposs_bot = []
xposs_bot_missing = []
yposs_bot_scatters = []
#1
rng = np.linspace(10, 2040, 50).astype(np.int)
for i in rng:
xp = i
#2
v = select_roi(data, roi_width)
xs = np.arange(len(v))
# Modified from 450 as the hard coded threshold to one that
# can be controlled by a keyword
if (np.median(v) < edgeThreshold):
xposs_top_missing.append(xp)
xposs_bot_missing.append(xp)
continue
#3
ff = Fit.do_fit(v, residual_fun=Fit.residual_disjoint_pair)
fit_ok = 0 < ff[4] < 4
if fit_ok:
(sigma, offset, mult1, mult2, add, width) = ff[0]
xposs_top.append(xp)
yposs_top.append(y - roi_width + offset + width)
xposs_bot.append(xp)
yposs_bot.append(y - roi_width + offset)
between = offset + width//2
if 0 < between < len(v)-1:
start = int(np.max([0, between-2]))
stop = int(np.min([len(v),between+2]))
yposs_bot_scatters.append(np.min(v[start:stop])) # 5
if False:
pl.figure(2)
pl.clf()
tmppix = np.arange(y-roi_width, y+roi_width)
tmpx = np.arange(len(v))
pl.axvline(y - roi_width + offset + width, color='red')
pl.axvline(y - roi_width + offset, color='red')
pl.scatter(tmppix, v)
pl.plot(tmppix, Fit.fit_disjoint_pair(ff[0], tmpx))
pl.axhline(yposs_bot_scatters[-1])
pl.draw()
else:
yposs_bot_scatters.append(np.nan)
else:
xposs_bot_missing.append(xp)
xposs_top_missing.append(xp)
info("Skipping wavelength pixel): %i" % (xp))
return list(map(np.array, (xposs_bot, xposs_bot_missing, yposs_bot, xposs_top,
xposs_top_missing, yposs_top, yposs_bot_scatters)))
def fit_edge_poly(xposs, xposs_missing, yposs, order):
'''
fit_edge_poly fits a polynomial to the measured slit edges.
This polynomial is used to extract spectra.
fit_edge_poly computes a parabola, and fills in missing data with a
parabola
input-
xposs, yposs [N]: The x and y positions of the slit edge [pix]
order: the polynomial order
'''
# First fit low order polynomial to fill in missing data
fun = np.poly1d(Fit.polyfit_clip(xposs, yposs, 2))
xposs = np.append(xposs, xposs_missing)
yposs = np.append(yposs, fun(xposs_missing))
# Remove any fits that deviate wildly from the 2nd order polynomial
ok = np.abs(yposs - fun(xposs)) < 1
if not ok.any():
error("Flat is not well illuminated? Cannot find edges")
raise Exception("Flat is not well illuminated? Cannot find edges")
# Now refit to user requested order
fun = np.poly1d(Fit.polyfit_clip(xposs[ok], yposs[ok], order))
yposs_ok = yposs[ok]
res = fun(xposs[ok]) - yposs[ok]
sd = np.std(res)
ok = np.abs(res) < 2*sd
# Check to see if the slit edge funciton is sane,
# if it's not, then we fix it.
pix = np.arange(2048)
V = fun(pix)
if np.abs(V.max() - V.min()) > 10:
info ("Forcing a horizontal slit edge")
print("Forcing a horizontal slit edge")
tmp = yposs_ok[ok]
fun = np.poly1d(np.median(tmp))
#fun = np.poly1d(np.median(yposs[ok]))
return (fun, res, sd, ok)
def find_long2pos_edges(data, header, bs, options, edgeThreshold=450,longslit=None):
y = 2034
DY = 44.25
toc = 0
ssl = bs.ssl
slits = []
top = [0., np.float(Options.npix)]
start_slit_num = int(bs.msl[0]['Slit_Number'])-1
if start_slit_num > 0:
y -= DY * start_slit_num
# if the mask is a long slit, the default y value will be wrong. Set instead to be the middle
if bs.long_slit:
try:
y=longslit["yrange"][1]
except:
error ("Longslit reduction mode is specified, but the row position has not been specified. Defaulting to "+str(y))
print("Longslit reduction mode is specified, but the row position has not been specified. Defaulting to "+str(y))
# Count and check that the # of objects in the SSL matches that of the MSL
# This is purely a safety check
numslits = np.zeros(len(ssl))
for i in range(len(ssl)):
slit = ssl[i]
M = np.where(slit["Target_Name"] == bs.msl["Target_in_Slit"])
numslits[i] = len(M[0])
numslits = np.array(numslits)
info("Number of slits allocated for this longslit: "+str(np.sum(numslits)))
# now begin steps outline above
results = []
for slit in [0,1]:
result = {}
result["Target_Name"] = ssl[slit]["Target_Name"]
# 1 Defines a polynomial of degree 0, which is a constant, with the value of the top of the slit
result["top"] = np.poly1d([longslit["yrange"][slit][1]]) # 1
topfun = np.poly1d([longslit["yrange"][slit][1]]) # this is a constant funtion with c=top of the slit # 1
botfun = np.poly1d([longslit["yrange"][slit][0]]) # this is a constant funtion with c=bottom of the slit # 0
# xposs_top_this = [10 110 210 .... 1810 1910]
xposs_top = np.arange(10,2000,100)
xposs_bot = np.arange(10,2000,100)
# yposs_top_this = [1104 1104 ... 1104 1104], it's the constant polynomium calculated at the X positions
yposs_top = topfun(xposs_top)
yposs_bot = botfun(xposs_bot)
''' Deal with the current slit '''
target=slit
hpps = Wavelength.estimate_half_power_points(
bs.scislit_to_csuslit(target+1)[0], header, bs)
ok = np.where((xposs_top > hpps[0]) & (xposs_top < hpps[1]))
xposs_bot = xposs_bot[ok]
yposs_bot = yposs_bot[ok]
xposs_top = xposs_top[ok]
yposs_top = yposs_top[ok]
if len(xposs_bot) == 0:
error ("The slit edges specifications appear to be incorrect.")
raise Exception("The slit edges specifications appear to be incorrect.")
# bot is the polynomium that defines the shape of the bottom of the slit. In this case, we set it to a constant.
bot = botfun.c.copy()
top = topfun.c.copy()
#4
result = {}
result["Target_Name"] = ssl[target]["Target_Name"]
result["xposs_top"] = xposs_top
result["yposs_top"] = yposs_top
result["xposs_bot"] = xposs_bot
result["yposs_bot"] = yposs_bot
result["top"] = np.poly1d(top)
result["bottom"] = np.poly1d(bot)
result["hpps"] = hpps
result["ok"] = ok
results.append(result)
#print("And the top is"+str(result["top"]))
results.append({"version": options["version"]})
return results
def find_longslit_edges(data, header, bs, options, edgeThreshold=450,longslit=None):
y = 2034
DY = 44.25
toc = 0
ssl = bs.ssl
slits = []
top = [0., np.float(Options.npix)]
start_slit_num = int(bs.msl[0]['Slit_Number'])-1
if start_slit_num > 0:
y -= DY * start_slit_num
# if the mask is a long slit, the default y value will be wrong. Set instead to be the middle
if bs.long_slit:
try:
y=longslit["yrange"][1]
except:
error("Longslit reduction mode is specified, but the row position has not been specified. Defaulting to "+str(y))
print("Longslit reduction mode is specified, but the row position has not been specified. Defaulting to "+str(y))
# Count and check that the # of objects in the SSL matches that of the MSL
# This is purely a safety check
numslits = np.zeros(len(ssl))
for i in range(len(ssl)):
slit = ssl[i]
M = np.where(slit["Target_Name"] == bs.msl["Target_in_Slit"])
numslits[i] = len(M[0])
numslits = np.array(numslits)
info("Number of slits allocated for this longslit: "+str(np.sum(numslits)))
# now begin steps outline above
results = []
result = {}
result["Target_Name"] = ssl[0]["Target_Name"]
# 1 Defines a polynomial of degree 0, which is a constant, with the value of the top of the slit
result["top"] = np.poly1d([longslit["yrange"][1]])
topfun = np.poly1d([longslit["yrange"][1]]) # this is a constant funtion with c=top of the slit
botfun = np.poly1d([longslit["yrange"][0]]) # this is a constant funtion with c=bottom of the slit
# xposs_top_this = [10 110 210 .... 1810 1910]
xposs_top = np.arange(10,2000,100)
xposs_bot = np.arange(10,2000,100)
# yposs_top_this = [1104 1104 ... 1104 1104], it's the constant polynomium calculated at the X positions
yposs_top = topfun(xposs_top)
yposs_bot = botfun(xposs_bot)
''' Deal with the current slit '''
target=0
hpps = Wavelength.estimate_half_power_points(
bs.scislit_to_csuslit(target+1)[0], header, bs)
ok = np.where((xposs_top > hpps[0]) & (xposs_top < hpps[1]))
xposs_bot = xposs_bot[ok]
yposs_bot = yposs_bot[ok]
xposs_top = xposs_top[ok]
yposs_top = yposs_top[ok]
if len(xposs_bot) == 0:
error ("The slit edges specifications appear to be incorrect.")
raise Exception("The slit edges specifications appear to be incorrect.")
# bot is the polynomium that defines the shape of the bottom of the slit. In this case, we set it to a constant.
bot = botfun.c.copy()
top = topfun.c.copy()
#4
result = {}
result["Target_Name"] = ssl[target]["Target_Name"]
result["xposs_top"] = xposs_top
result["yposs_top"] = yposs_top
result["xposs_bot"] = xposs_bot
result["yposs_bot"] = yposs_bot
result["top"] = np.poly1d(top)
result["bottom"] = np.poly1d(bot)
result["hpps"] = hpps
result["ok"] = ok
results.append(result)
results.append({"version": options["version"]})
return results
def find_and_fit_edges(data, header, bs, options,edgeThreshold=450):
'''
Given a flat field image, find_and_fit_edges determines the position
of all slits.
The function works by starting with a guess at the location for a slit
edge in the spatial direction(options["first-slit-edge"]).
Starting from the guess, find_edge_pair works out in either direction,
measuring the position of the (e.g.) bottom of slit 1 and top of slit 2:
------ pixel y value = 2048
Slit 1 data
------ (bottom)
deadband
------ (top)
Slit N pixel data ....
------- (bottom) pixel = 0
--------------------------------> Spectral direction
1. At the top of the flat, the slit edge is defined to be a pixel value
2. The code guesses the position of the bottom of the slit, and runs
find_edge_pair to measure slit edge locations.
3. A low-order polynomial is fit to the edge locations with
fit_edge_poly
4. The top and bottom of the current slit, is stored into the
result list.
5. The top of the next slit is stored temporarily for the next
iteration of the for loop.
6. At the bottom of the flat, the slit edge is defined to be pixel 4.
options:
options["edge-order"] -- The order of the polynomial [pixels] edge.
options["edge-fit-width"] -- The length [pixels] of the edge to
fit over
'''
# TODO: move hardcoded values into Options.py
# y is the location to start
y = 2034
DY = 44.25
toc = 0
ssl = bs.ssl
slits = []
top = [0., np.float(Options.npix)]
start_slit_num = int(bs.msl[0]['Slit_Number'])-1
if start_slit_num > 0:
y -= DY * start_slit_num
# Count and check that the # of objects in the SSL matches that of the MSL
# This is purely a safety check
numslits = np.zeros(len(ssl))
for i in range(len(ssl)):
slit = ssl[i]
M = np.where(slit["Target_Name"] == bs.msl["Target_in_Slit"])
numslits[i] = len(M[0])
numslits = np.array(numslits)
if (np.sum(numslits) != CSU.numslits) and (not bs.long_slit) and (not bs.long2pos_slit):
error ("The number of allocated CSU slits (%i) does not match "
" the number of possible slits (%i)." % (np.sum(numslits),
CSU.numslits))
raise Exception("The number of allocated CSU slits (%i) does not match "
" the number of possible slits (%i)." % (np.sum(numslits),
CSU.numslits))
# if the mask is a long slit, the default y value will be wrong. Set instead to be the middle
if bs.long_slit:
y = 1104
# now begin steps outline above
results = []
result = {}
result["Target_Name"] = ssl[0]["Target_Name"]
# 1
result["top"] = np.poly1d([y])
''' Nomenclature here is confusing:
----- Edge -- Top of current slit, bottom of prev slit
. o ' Data
===== Data
.;.;' Data
----- Edge -- Bottom of current slit, top of next slit
'''
topfun = np.poly1d([y])
xposs_top_this = np.arange(10,2000,100)
yposs_top_this = topfun(xposs_top_this)
initial_edges = np.array([2034], dtype=np.int)
edge = 2034
# build an array of values containing the lower edge of the slits
for target in range(len(ssl)):
# target is the slit number
edge -= DY * numslits[target]
initial_edges=np.append(initial_edges,int(edge))
# collapse the 2d flat along the walenegth axis to build a spatial profile of the slits
vertical_profile = np.mean(data, axis=1)
# build an array containing the spatial positions of the slit centers, basically the mid point between the expected
# top and bottom values of the slit pixels
spatial_centers = np.array([], dtype=np.int)
for k in np.arange(0,len(initial_edges)-1):
spatial_centers = np.append(spatial_centers,(initial_edges[k]+initial_edges[k+1])//2)
#slit_values=np.array([])
#for k in np.arange(0, len(spatial_centers)):
# slit_values = np.append(slit_values,np.mean(vertical_profile[spatial_centers[k]-3:spatial_centers[k]+3]))
for target in range(len(ssl)):
y -= DY * numslits[target]
y = max(y, 1)
# select a 6 pixel wide section of the vertical profile around the slit center
threshold_area = vertical_profile[spatial_centers[target]-3:spatial_centers[target]+3]
# uses 80% of the ADU counts in the threshold area to estimate the threshold to use in defining the slits
edgeThreshold = np.mean(threshold_area)*0.8
#if edgeThreshold > 450:
# edgeThreshold = 450
info("[%2.2i] Finding Slit Edges for %s ending at %4.0i. Slit "
"composed of %i CSU slits" % ( target,
ssl[target]["Target_Name"], y, numslits[target]))
info("[%2.2i] Threshold used is %.1f" % (target,edgeThreshold))
''' First deal with the current slit '''
hpps = Wavelength.estimate_half_power_points(
bs.scislit_to_csuslit(target+1)[0], header, bs)
if y == 1:
xposs_bot = [1024]
xposs_bot_missing = []
yposs_bot = [4.25]
botfun = np.poly1d(yposs_bot)
ok = np.where((xposs_bot > hpps[0]) & (xposs_bot < hpps[1]))
else:
(xposs_top_next, xposs_top_next_missing, yposs_top_next, xposs_bot,
xposs_bot_missing, yposs_bot, scatter_bot_this) = find_edge_pair(
data, y, options["edge-fit-width"],edgeThreshold=edgeThreshold)
ok = np.where((xposs_bot > hpps[0]) & (xposs_bot < hpps[1]))
ok2 = np.where((xposs_bot_missing > hpps[0]) & (xposs_bot_missing <
hpps[1]))
xposs_bot = xposs_bot[ok]
xposs_bot_missing = xposs_bot_missing[ok2]
yposs_bot = yposs_bot[ok]
if len(xposs_bot) == 0:
botfun = np.poly1d(y-DY)
else:
(botfun, bot_res, botsd, botok) = fit_edge_poly(xposs_bot,
xposs_bot_missing, yposs_bot, options["edge-order"])
bot = botfun.c.copy()
top = topfun.c.copy()
#4
result = {}
result["Target_Name"] = ssl[target]["Target_Name"]
result["xposs_top"] = xposs_top_this
result["yposs_top"] = yposs_top_this
result["xposs_bot"] = xposs_bot
result["yposs_bot"] = yposs_bot
result["top"] = np.poly1d(top)
result["bottom"] = np.poly1d(bot)
result["hpps"] = hpps
result["ok"] = ok
results.append(result)
#5
if y == 1:
break
next = target + 2
if next > len(ssl): next = len(ssl)
hpps_next = Wavelength.estimate_half_power_points(
bs.scislit_to_csuslit(next)[0],
header, bs)
ok = np.where((xposs_top_next > hpps_next[0]) & (xposs_top_next <
hpps_next[1]))
ok2 = np.where((xposs_top_next_missing > hpps_next[0]) &
(xposs_top_next_missing < hpps_next[1]))
xposs_top_next = xposs_top_next[ok]
xposs_top_next_missing = xposs_top_next_missing[ok2]
yposs_top_next = yposs_top_next[ok]
if len(xposs_top_next) == 0:
topfun = np.poly1d(y)
else:
(topfun, topres, topsd, ok) = fit_edge_poly(xposs_top_next,
xposs_top_next_missing, yposs_top_next, options["edge-order"])
xposs_top_this = xposs_top_next
xposs_top_this_missing = xposs_top_next_missing
yposs_top_this = yposs_top_next
results.append({"version": options["version"]})
return results
class FitCheck(object):
flat = None
bs = None
edges = None
cutout = None # Is at most 2 edges x 46 slits x 11 pix or 1112 pixels
def __init__(self, maskname, bandname, options, fig):
self.fig = fig
self.flat = IO.read_drpfits(maskname, "combflat_2d_%s.fits" % bandname,
options)
self.edges, meta = IO.load_edges(maskname, bandname, options)
self.edgeno=2
self.cid = self.fig.canvas.mpl_connect('key_press_event', self)
self.draw()
def draw(self):
print(self.edgeno)
pos = 0
dy = 8
edgeno = self.edgeno
edge = self.edges[edgeno]
edgeprev = self.edges[edgeno-1]
p = np.round(edge["top"](1024))
top = min(p+2*dy, 2048)
bot = min(p-2*dy, 2048)
self.cutout = self.flat[1][bot:top,:].copy()
pl.figure(1)
pl.clf()
start = 0
dy = 512
for i in range(2048//dy):
pl.subplot(2048//dy,1,i+1)
pl.xlim(start, start+dy)
if i == 0: pl.title("edge %i] %s|%s" % (edgeno,
edgeprev["Target_Name"], edge["Target_Name"]))
pl.subplots_adjust(left=.07,right=.99,bottom=.05,top=.95)
pl.imshow(self.flat[1][bot:top,start:start+dy], extent=(start,
start+dy, bot, top), cmap='Greys', vmin=2000, vmax=6000)
pix = np.arange(start, start+dy)
pl.plot(pix, edge["top"](pix), 'r', linewidth=1)
pl.plot(pix, edgeprev["bottom"](pix), 'r', linewidth=1)
pl.plot(edge["xposs_top"], edge["yposs_top"], 'o')
pl.plot(edgeprev["xposs_bot"], edgeprev["yposs_bot"], 'o')
hpp = edge["hpps"]
pl.axvline(hpp[0],ymax=.5, color='blue', linewidth=5)
pl.axvline(hpp[1],ymax=.5, color='red', linewidth=5)
hpp = edgeprev["hpps"]
pl.axvline(hpp[0],ymin=.5,color='blue', linewidth=5)
pl.axvline(hpp[1],ymin=.5,color='red', linewidth=5)
if False:
L = top-bot
Lx = len(edge["xposs"])
for i in range(Lx):
xp = edge["xposs"][i]
frac1 = (edge["top"](xp)-bot-1)//L
pl.axvline(xp,ymin=frac1)
for xp in edgeprev["xposs"]:
frac2 = (edgeprev["bottom"](xp)-bot)//L
pl.axvline(xp,ymax=frac2)
start += dy
def __call__(self, event):
kp = event.key
x = event.xdata
y = event.ydata
print(kp)
if kp == 'n':
self.edgeno += 1
if self.edgeno > len(self.edges):
self.edgeno = len(self.edges)
print("done")
else:
self.draw()
if kp == 'p':
self.edgeno -= 1
if self.edgeno < 2:
self.edgeno = 2
print("Beginning" )
else:
self.draw()
class TestFlatsFunctions(unittest.TestCase):
def setUp(self):
pass
def test_trace_edge(self):
(header, data1, targs, ssl, msl, asl) = \
IO.readfits_all("/users/npk/desktop/c9/m110326_3242.fits")
data = data1
ssl = ssl[ssl["Slit_Number"] != ' ']
numslits = np.round(np.array(ssl["Slit_length"],
dtype=np.float) / 7.02)
for i in range(len(ssl)):
print(ssl[i]["Target_Name"], numslits[i])
if __name__ == '__main__':
unittest.main()
| 35,490 | 33.125962 | 131 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Longslit.py |
# MOSFIRE Longslit Reductions
# 5 Aug 2012
# Nick Konidaris
import os
import pdb
import numpy as np
import scipy
from MOSFIRE import Detector, IO, Filters, Wavelength
def rectify(dname, lamdat, A, B, maskname, band, wavoptions,
longoptions):
header, data = IO.readfits(dname)
raw_img = data * Detector.gain / header['TRUITIME']
dlam = Wavelength.grating_results(band)
hpp = np.array(Filters.hpp[band])
ll_fid = np.arange(hpp[0], hpp[1], dlam)
rectified = np.zeros((2048, len(ll_fid)))
from scipy.interpolate import interp1d
for i in xrange(2048):
ll = lamdat[i,:]
ss = raw_img[i,:]
ok = np.isfinite(ll) & np.isfinite(ss) & (ll < hpp[1]) & (ll >
hpp[0])
if len(np.where(ok)[0]) < 30:
continue
f = interp1d(ll[ok], ss[ok], bounds_error=False)
rectified[i,:] = f(ll_fid)
header["wat0_001"] = "system=world"
header["wat1_001"] = "wtype=linear"
header["wat2_001"] = "wtype=linear"
header["dispaxis"] = 1
header["dclog1"] = "Transform"
header["dc-flag"] = 0
header["ctype1"] = "AWAV"
header["cunit1"] = "Angstrom"
header["crval1"] = ll_fid[0]
header["crval2"] = 0
header["crpix1"] = 1
header["crpix2"] = 1
header["cdelt1"] = 1
header["cdelt2"] = 1
header["cname1"] = "angstrom"
header["cname2"] = "pixel"
header["cd1_1"] = dlam
header["cd1_2"] = 0
header["cd2_1"] = 0
header["cd2_2"] = 1
header["object"] = "rectified [eps]"
IO.writefits(rectified, maskname, "rectified_%s" % (dname),
wavoptions, header=header, overwrite=True, lossy_compress=True)
def imdiff(A, B, maskname, band, header, options):
s = "[0]"
targname = A[1]["targname"].rstrip(" ")
if targname == "":
objname = A[1]["object"].replace(" ", "_")
else:
objname = targname.replace(" ", "_")
operand1 = A[0] + '[0]'
operand2 = B[0] + '[0]'
imnumA = A[0].split('_')[-1].rstrip(".fits")
imnumB = B[0].split('_')[-1].rstrip(".fits")
dname = "{0}_{1}_{2}_{3}-{4}_{5}-{6}.fits".format(maskname, objname, band,
A[1]["frameid"], B[1]["frameid"], imnumA, imnumB)
try: os.remove(dname)
except:pass
print("Data Diff {0}-{1}".format(operand1,operand2))
IO.imarith(operand1, '-', operand2, dname)
''' Now handle variance '''
numreads = header["READS0"]
RN_adu = Detector.RN / np.sqrt(numreads) / Detector.gain
varname = "var_{0}_{1}_{2}_{3}+{4}_{5}+{6}.fits".format(maskname, objname, band,
A[1]["frameid"], B[1]["frameid"], imnumA, imnumB)
print("Var Sum {0}+{1}".format(operand1,operand2))
IO.imarith(operand1, '+', operand2, "tmp_" + varname)
try: os.remove(varname)
except: pass
print("Var add RN {0}+{1}".format(operand1,RN_adu**2))
IO.imarith("tmp_" + varname, '+', RN_adu**2, varname)
try: os.remove("tmp_" + varname)
except: pass
return dname, varname
def apply_flat(scifilename, maskname, band):
''' Divides the contents of scifilename by the flat field and
overwrites scifilename with the same file divided by the flat
Args:
scifilename: Path to science file name.
maskname: The mask name
band: The filter bands
Results:
Overwrites scifilename where the data contents of the file
are divided by the pixel flat
'''
pixelflat_file = "pixelflat_2d_{0}.fits".format(band)
flat = readfits(pixelflat_file, use_bpm=True)
flat_data = flat[1].filled(1.0)
header, data = IO.readfits(scifilename)
print("Applying flat to file {0}".format(scifilename))
IO.writefits(data/flat_data, maskname, scifilename, {}, header=header,
overwrite=True)
def go(maskname,
band,
filenames,
wavefile,
wavoptions,
longoptions,
use_flat=False):
'''
The go command is the main entry point into this module.
Inputs:
maskname: String of the mask name
band: String of 'Y', 'J', 'H', or 'K'
filenames: List of filenames to reduce
wavefile: String of path to FITS file with the wavelength solution
wavoptions: The Wavelength Options dictionary
longoptions: Dictionary containing:
{'yrange': The pixel range to extract over
'row_position': The row to solve the initial wavelength solution on}
use_flat: Boolean False [default] means to use no flat field
Boolean True means to divide by the pixelflat
'''
wavename = Wavelength.filelist_to_wavename(filenames, band, maskname,
wavoptions).rstrip(".fits")
print("Wavefile: {0}".format(wavefile))
lamhdr, lamdat = IO.readfits(wavefile)
positions = []
objname = None
for listfile in filenames:
fnames = IO.list_file_to_strings(listfile)
if len(fnames) != 1:
raise Exception("I currently expect only one file per position. Remove multiple entries and try again")
header, data, bs = IO.readmosfits(fnames[0], wavoptions)
if objname is None:
objname = header["object"]
if objname != header["object"]:
print ("Trying to combine longslit stack of object {0} "
"with object {1}".format(objname, header["object"]))
print("{0:18s} {1:30s} {2:2s} {3:4.1f}".format(file, header["object"],
header["frameid"], header["yoffset"]))
positions.append([fnames[0], header, data, bs])
print("{0:2g} nod positions found. Producing stacked difference" \
" image.".format(len(positions)))
for i in xrange(len(positions)-1):
A = positions[i]
B = positions[i+1]
print("----------- -----".format(A,B))
dname, varname = imdiff(A, B, maskname, band, header, wavoptions)
if use_flat:
apply_flat(dname, maskname, band)
apply_flat(varname, maskname, band)
rectify(dname, lamdat, A, B, maskname, band, wavoptions,
longoptions)
rectify(varname, lamdat, A, B, maskname, band, wavoptions,
longoptions)
print dname
dname, vname = imdiff(B, A, maskname, band, header, wavoptions)
if use_flat:
apply_flat(dname, maskname, band)
apply_flat(vname, maskname, band)
rectify(dname, lamdat, B, A, maskname, band, wavoptions,
longoptions)
rectify(vname, lamdat, B, A, maskname, band, wavoptions,
longoptions)
if False:
fname = os.path.join(path, wavename + ".fits")
B = IO.readfits(fname)
B = [fname, B[0], B[1]]
for i in xrange(len(positions)):
A = positions[i]
imdiff(A, B, maskname, band, wavoptions)
rectify(path, dname, lamdat, A, B, maskname, band, wavoptions,
longoptions)
imdiff(B, A, maskname, band, wavoptions)
rectify(path, dname, lamdat, B, A, maskname, band, wavoptions,
longoptions)
| 7,104 | 30.026201 | 115 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/__init__.py | '''
MOSFIRE Data Reduction Pipeline and instrument control software
'''
''' Version number must be a float'''
__version__ = 0.11
| 131 | 15.5 | 63 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/numerixenv.py | from __future__ import division # confidence high
import os
def check_input(xxx):
"""Check if input is a Numarray Array."""
try:
import numarray
return isinstance(xxx,numarray.numarraycore.NumArray)
except ImportError:
pass
def check():
"""Check for running numarray version of pyfits with numpy code."""
try:
import pyfits
if pyfits.__version__ < '1.1':
raise EnvironmentError("Pyfits 1.1 or later required, pyfits version %s detected\n" % pyfits.__version__)
except ImportError:
pass
try:
if os.environ['NUMERIX'] == 'numarray':
raise EnvironmentError("NUMERIX/numarray environment detected; numpy environment required")
except KeyError:
pass
| 778 | 26.821429 | 117 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Extract.py | #!/usr/env/python
## Import General Tools
import os
import sys
import textwrap
import re
import numpy as np
from astropy.io import fits
from astropy import units as u
from astropy import table
from astropy import wcs
from astropy.modeling import models, fitting
import scipy.signal as signal
from matplotlib import pyplot as pl
pl.rcParams['keymap.fullscreen'] = ''
pl.rcParams['keymap.grid'] = ''
pl.rcParams['keymap.save'] = ''
from MOSFIRE import Detector
from MOSFIRE.MosfireDrpLog import info, debug, warning, error
RN = Detector.RN * u.electron
Gain = Detector.gain * u.electron/u.adu
py3 = sys.version_info[0] > 2 #creates boolean value for test that Python > 2
##-------------------------------------------------------------------------
## Find Stellar Traces
##-------------------------------------------------------------------------
def print_instructions():
'''Print instructions for interactive fit to screen.
'''
text = [
('The figure shows the raw spectrum collapsed in the wavelength'
' direction as black points. If there are one or more object traces '
'with reasonable '
'signal to noise, you should see one or more signals (e.g. a positive '
'gaussian profile).'
),
('You can use this tool to add, modify, and delete apertures. '
'The apertures are indicated by a yellow shaded region and their center '
'position and half width in pixels is annotated near the top of each '
'shaded region. There may be automatically generated regions already '
'shown if that option was selected when the software was run.'
),
('The apertures define the pixels which will be used as input to the '
'optimal spectral extraction (Horne 1986) algorithm. Having wide a '
'wide aperture should not add additional noise as that will be '
'optimized during the spectral extraction step. The apertures are shown '
'here in order for the user to verify 1) that there is no overlap between '
'adjacent objects, 2) that the apertures are wide enough to reasonably '
'encompass all flux from the object, and 3) that all objects have '
'properly defined apertures.'
),
('To delete an existing aperture: place the mouse near the center of the '
'aperture and press the "d" key.'
),
('To add an aperture by fitting a gaussian to the profile: place the mouse '
'near the peak of the profile and press the "g" key. The half width of '
'the aperture will be set at 5 times the sigma of the fitted gaussian. '
'If a gaussian fit has been used to definte an aperture, the fit will be '
'shown as a blue line.'
),
('To add an aperture manually: place the mouse in the X position where the '
'new aperture should be centered and press the "a" key. Then type the half'
' width (in pixels) for that aperture in response to the query in the '
'terminal.'
),
('To modify the half width of an existing aperture: place the mouse near '
'the center of the aperture and press the "w" key. Then type the half'
' width (in pixels) for that aperture in response to the query in the '
'terminal.'
),
('To modify the center position of an existing aperture: place the mouse '
'near the center of the aperture and press the "p" key. Then type the '
'position (in pixels) for that aperture in response to the query in the '
'terminal.'
),
('When you are done adding or removing apertures, close the interactive '
'plot window by clicking the close button in the upper right corner '
'(or by whatever method is typical for your OS or windowing system) '
'or press the "q" or "n" keys (for "quit" or "next" respectively).'
),
]
print('#'*80)
print(' Aperture Definition Tool Instructions')
for paragraph in text:
print()
print(textwrap.fill(textwrap.dedent(paragraph).strip('\n'), width=80))
print('#'*80)
print()
class ApertureEditor(object):
def __init__(self, hdu, title=None, interactive=False):
self.header = hdu.header
self.data = np.ma.MaskedArray(data=hdu.data, mask=np.isnan(hdu.data))
self.ydata = np.mean(self.data, axis=1)
self.xdata = list(range(0,len(self.ydata), 1))
self.interactive = interactive
if self.interactive:
self.fig = pl.figure()
self.ax = self.fig.gca()
self.title = title
if title:
info('Editing apertures for {}'.format(title))
else:
info('Editing apertures')
self.apertures = table.Table(names=('id', 'position', 'width',\
'amplitude', 'sigma'),\
dtype=('i4', 'f4', 'f4', 'f4', 'f4'))
def add_aperture(self, pos, width):
info(' Adding aperture at position {} of width {}'.format(pos, width))
id = len(self.apertures)
data = {'id': id,
'position': pos,
'width': width,
'amplitude': None,
'sigma': None,
}
self.apertures.add_row(data)
self.plot_apertures()
def delete_aperture(self, id):
pos = self.apertures[id]['position']
info(' Removing aperture at position {:.0f}'.format(pos))
self.apertures.remove_row(id)
self.plot_apertures()
def set_position(self, id=None, pos=None):
assert id is not None
info(' Moving aperture {} from {:.0f} to {:.0f}'.format(id,
float(self.apertures[id]['position']), float(pos)))
if pos:
self.apertures[id]['position'] = pos
else:
if py3:
response = input("Please enter new position in pixels: ")
else:
response = raw_input("Please enter new position in pixels: ")
pos = int(response)
self.apertures[id]['position'] = pos
self.plot_apertures()
def set_width(self, id=None, width=None):
assert id is not None
info(' Changing width for aperture {} at position {:.0f}'.format(id,
self.apertures[id]['position']))
if width:
self.apertures[id]['width'] = width
else:
if py3:
response = input("Please enter new half width in pixels: ")
else:
response = raw_input("Please enter new half width in pixels: ")
width = int(response)
self.apertures[id]['width'] = width
self.plot_apertures()
def guess(self):
## Try to guess at aperture positions if no information given
## Start with maximum pixel value
valid_profile = list(self.ydata[~self.ydata.mask])
maxval = max(valid_profile)
maxind = valid_profile.index(maxval)
info(' Guessing at aperture near position {}'.format(maxind))
return (maxind, maxval)
# self.fit_trace(maxind, maxval)
def fit_trace(self, pos, amp):
info(' Adding fitted aperture near position {:.0f}'.format(pos))
id = len(self.apertures)
g0 = models.Gaussian1D(mean=pos, amplitude=amp,
bounds={'amplitude': [0, float('+Inf')]})
fitter = fitting.LevMarLSQFitter()
g = fitter(g0, self.xdata, self.ydata)
# Set maximum width of aperture to the YOFFSET parameter
try:
maxap = np.floor(float(self.header['YOFFSET'])/0.1799)
except:
maxap = self.data.shape[0]
# Set minimum width of aperture to an estimate of 3x the seeing
seeing = 0.5 # arcsec
minap = np.ceil(3*seeing/0.1799)
width = np.ceil(5.*g.param_sets[2][0])
width = max(min(maxap, width), minap)
data = {'id': id,
'position': g.param_sets[1][0],
'width': width,
'amplitude': g.param_sets[0][0],
'sigma': g.param_sets[2][0],
}
self.apertures.add_row(data)
self.plot_apertures()
def plot_data(self):
'''Plot the raw data without apertures.
'''
if self.interactive:
debug(' Plotting profile data')
pl.plot(self.xdata, self.ydata, 'k-',
label='Spatial Profile', drawstyle='steps-mid')
pl.xlim(min(self.xdata), max(self.xdata))
yspan = self.ydata.max() - self.ydata.min()
pl.ylim(self.ydata.min()-0.02*yspan, self.ydata.max()+0.18*yspan)
pl.xlabel('Pixel Position')
pl.ylabel('Flux (e-/sec)')
if self.title is None:
pl.title('Spatial Profile')
else:
pl.title(self.title)
def plot_apertures(self):
if self.interactive:
pl.cla()
self.plot_data()
yspan = self.ydata.max() - self.ydata.min()
if len(self.apertures) == 0:
pl.draw()
for ap in self.apertures:
debug(' Plotting aperture at position {}'.format(ap['position']))
if ap['sigma'] is not None and ap['amplitude'] is not None:
g = models.Gaussian1D(mean=ap['position'],
amplitude=ap['amplitude'],
stddev=ap['sigma'])
fit = [g(x) for x in self.xdata]
pl.plot(self.xdata, fit, 'b-', label='Fit', alpha=0.5)
shadeymin = np.floor(self.ydata.min())
shadeymax = np.ceil(self.ydata.max())
pl.axvspan(ap['position']-ap['width'],
ap['position']+ap['width'],
ymin=shadeymin,
ymax=shadeymax,
facecolor='y', alpha=0.3,
)
pl.text(ap['position']-ap['width']+1,
self.ydata.max() + 0.05*yspan,
'position={:.0f}\nwidth={:.0f}'.format(ap['position'],
ap['width']),
)
pl.draw()
pl.show()
def connect(self):
'''Connect keypresses to matplotlib for interactivity.
'''
self.cid_key = self.fig.canvas.mpl_connect('key_press_event',
self.keypress)
def disconnect(self):
self.fig.canvas.mpl_disconnect(self.cid_key)
def determine_id(self, event):
x = event.xdata
closest = None
for i,ap in enumerate(self.apertures):
id = ap['id']
d = abs(ap['position'] - x)
if closest is None:
closest = (id, d)
elif d < closest[1]:
closest = (id, d)
debug(' ID of aerture nearest to keypress is {}'.format(closest[0]))
return closest[0]
def keypress(self, event):
'''Based on which key is presses on a key press event, call the
appropriate method.
'''
if event.key == 'a':
if py3:
response = input("Please enter new half width in pixels: ")
else:
response = raw_input("Please enter new half width in pixels: ")
width = int(response)
self.add_aperture(event.xdata, width)
print('Adding aperture at position {}, width {}'.format(event.xdata,
width))
elif event.key == 'w':
id = self.determine_id(event)
self.set_width(id=id)
elif event.key == 'p':
id = self.determine_id(event)
self.set_position(id=id, pos=event.xdata)
elif event.key == 'g':
self.fit_trace(event.xdata, event.ydata/abs(event.ydata))
elif event.key == 'd':
id = self.determine_id(event)
self.delete_aperture(id)
elif event.key == 'n':
self.quit(event)
elif event.key == 'q':
self.quit(event)
elif event.key == 's':
print(self.apertures)
elif event.key == 'r':
info(' Plotting apertures')
self.plot_apertures()
def savefig(self, plotfile):
'''Save the figure to a png file.
'''
self.fig.savefig(plotfile, bbox_inches='tight')
def quit(self, event):
info(' Done with aperture edits for this slit')
self.disconnect()
pl.close(self.fig)
##-------------------------------------------------------------------------
## Find Stellar Traces
##-------------------------------------------------------------------------
def find_apertures(hdu, guesses=[], width=10, title=None, interactive=True,
maskname=''):
'''Finds targets in spectra by simply collapsing the 2D spectra in the
wavelength direction and fitting Gaussian profiles to the positional profile
'''
pl.ioff()
ap = ApertureEditor(hdu, title=title, interactive=interactive)
if interactive:
ap.connect()
if re.search('LONGSLIT', maskname):
guesses = [int(np.argmax(ap.ydata))]
if (guesses == []) or (guesses is None):
# Guess at object position where specified in header by CRVAL2
pos = int(-hdu.header['CRVAL2'])
amp = ap.ydata[int(pos)]
# Estimate signal to noise of profile at that spot
# First, clip top and bottom 10 percent of pixels to roughly remove
# contribution by a single bright source.
pct = 10
pctile = (np.percentile(ap.ydata, pct), np.percentile(ap.ydata, 100-pct))
w = np.where((ap.ydata > pctile[0]) & (ap.ydata < pctile[1]))
# Use the unclipped pixels to estimate signal to noise.
std = np.std(ap.ydata[w])
snr = amp/std
# If SNR is strong, fit a gaussian, if not, just blindly add an aperture
if snr > 5:
info(' Using trace to determine extraction aperture')
ap.fit_trace(pos, amp)
else:
info(' Could not find strong trace, adding aperture blindly')
ap.add_aperture(pos, width)
else:
for guess in guesses:
ap.fit_trace(guess, ap.ydata[guess])
return ap.apertures
##-------------------------------------------------------------------------
## Standard Spectral Extraction
##-------------------------------------------------------------------------
def standard_extraction(data, variance):
spect_1D = np.sum(data, axis=0)
variance_1D = np.sum(variance, axis=0)
return spect_1D, variance_1D
##-------------------------------------------------------------------------
## Iterate Spatial Profile
##-------------------------------------------------------------------------
def iterate_spatial_profile(P, DmS, V, f,\
smoothing=5, order=3, minpixels=50,\
sigma=4, nclip=2, verbose=True):
poly0 = models.Polynomial1D(degree=order)
fit_poly = fitting.LinearLSQFitter()
Pnew = np.zeros(P.shape)
for i,row in enumerate(P):
weights = f**2/V[i]
weights.mask = weights.mask | np.isnan(weights)
srow = np.ma.MaskedArray(data=signal.medfilt(row, smoothing),\
mask=(np.isnan(signal.medfilt(row, smoothing)) | weights.mask))
xcoord = np.ma.MaskedArray(data=np.arange(0,len(row),1),\
mask=srow.mask)
for iter in range(nclip+1):
nrej_before = np.sum(srow.mask)
fitted_poly = fit_poly(poly0,\
xcoord[~xcoord.mask], srow[~srow.mask],\
weights=weights[~weights.mask])
fit = np.array([fitted_poly(x) for x in xcoord])
resid = (DmS[i]-f*srow)**2 / V[i]
newmask = (resid > sigma)
weights.mask = weights.mask | newmask
srow.mask = srow.mask | newmask
xcoord.mask = xcoord.mask | newmask
nrej_after = np.sum(srow.mask)
if nrej_after > nrej_before:
if verbose:\
info('Row {:3d}: Rejected {:d} pixels on clipping '+\
'iteration {:d}'.format(\
i, nrej_after-nrej_before, iter))
## Reject row if too few pixels are availabel for the fit
if (srow.shape[0] - nrej_after) < minpixels:
if verbose:
warning('Row {:3d}: WARNING! Only {:d} pixels remain after '+\
'clipping and masking'.format(\
i, srow.shape[0] - nrej_after))
fit = np.zeros(fit.shape)
## Set negative values to zero
if np.sum((fit<0)) > 0 and verbose:
info('Row {:3d}: Reset {:d} negative pixels in fit to 0'.format(\
i, np.sum((fit<0))))
fit[(fit < 0)] = 0
Pnew[i] = fit
return Pnew
##-------------------------------------------------------------------------
## Optimal Spectral Extraction
##-------------------------------------------------------------------------
def optimal_extraction(image, variance_image, aperture_table,
fitsfileout=None,
plotfileout=None,
plot=None):
'''Given a 2D spectrum image, a 2D variance image, and a table of apertures
(e.g. as output by find_apertures() above), this function will optimally
extract a 1D spectrum for each entry in the table of apertures.
'''
if type(image) == fits.HDUList:
hdu = image[0]
elif type(image) == fits.PrimaryHDU:
hdu = image
else:
error('Input to standard_extraction should be an HDUList or an HDU')
raise TypeError
if type(variance_image) == fits.HDUList:
vhdu = variance_image[0]
elif type(image) == fits.PrimaryHDU:
vhdu = variance_image
else:
error('Input to standard_extraction should be an HDUList or an HDU')
raise TypeError
spectra2D = hdu.data
variance2D = vhdu.data
header = hdu.header
worig = wcs.WCS(hdu.header)
## State assumptions
assert header['DISPAXIS'] == 1
assert worig.to_header()['CTYPE1'] == 'AWAV'
assert header['CD1_2'] == 0
assert header['CD2_1'] == 0
assert worig.dropaxis(1).to_header()['CTYPE1'] == 'AWAV'
## Replace old WCS in header with the collapsed WCS
w = worig.dropaxis(1)
for key in list(worig.to_header().keys()):
if key in list(header.keys()):
header.remove(key)
for key in list(w.to_header().keys()):
if key in ['PC1_1', 'CRVAL1']:
header.set(key, w.to_header()[key]*1e10,
w.to_header().comments[key])
elif key in ['CUNIT1', 'CNAME1']:
header.set(key, 'Angstrom', w.to_header().comments[key])
else:
header.set(key, w.to_header()[key],
w.to_header().comments[key])
spectra = []
variances = []
for i,row in enumerate(aperture_table):
pos = row['position']
width = int(row['width'])
info('Extracting aperture {:d} at position {:.0f}'.format(i, pos))
ymin = max([int(np.floor(pos-width)), 0])
ymax = min([int(np.ceil(pos+width)), spectra2D.shape[0]])
DmS = np.ma.MaskedArray(data=spectra2D[ymin:ymax,:],\
mask=np.isnan(spectra2D[ymin:ymax,:]))
V = np.ma.MaskedArray(data=variance2D[ymin:ymax,:],\
mask=np.isnan(spectra2D[ymin:ymax,:]))
info(' Performing standard extraction')
f_std, V_std = standard_extraction(DmS, V)
info(' Forming initial spatial profile')
P_init_data = np.array([row/f_std for row in DmS])
P_init = np.ma.MaskedArray(data=P_init_data,\
mask=np.isnan(P_init_data))
info(' Fitting spatial profile')
P = iterate_spatial_profile(P_init, DmS, V, f_std, verbose=False)
info(' Calculating optimally extracted spectrum')
f_new_denom = np.ma.MaskedArray(data=np.sum(P**2/V, axis=0),\
mask=(np.sum(P**2/V, axis=0)==0))
f_opt = np.sum(P*DmS/V, axis=0)/f_new_denom
var_fopt = np.sum(P, axis=0)/f_new_denom
sig_fopt = np.sqrt(var_fopt)
typical_sigma = np.mean(sig_fopt[~np.isnan(sig_fopt)])
spectra.append(f_opt)
variances.append(var_fopt)
info(' Typical level = {:.1f}'.format(np.mean(f_opt)))
info(' Typical sigma = {:.1f}'.format(typical_sigma))
mask = np.isnan(np.array(spectra)) | np.isnan(np.array(variances))
spectra = np.ma.MaskedArray(data=np.array(spectra), mask=mask)
variances = np.ma.MaskedArray(data=np.array(variances), mask=mask)
for i,row in enumerate(aperture_table):
hdulist = fits.HDUList([])
if plotfileout:
fig = pl.figure(figsize=(16,6))
wunit = getattr(u, w.to_header()['CUNIT1'])
sp = spectra[i]
hdulist.append(fits.PrimaryHDU(data=sp.filled(0), header=header))
hdulist[0].header['APPOS'] = row['position']
if plotfileout:
sigma = np.sqrt(variances[i])
pix = np.arange(0,sp.shape[0],1)
wavelengths = w.wcs_pix2world(pix,1)[0]*wunit.to(u.micron)*u.micron
fillmin = sp-sigma
fillmax = sp+sigma
mask = np.isnan(fillmin) | np.isnan(fillmax) | np.isnan(sp)
pl.fill_between(wavelengths[~mask], fillmin[~mask], fillmax[~mask],\
label='uncertainty',\
facecolor='black', alpha=0.2,\
linewidth=0,\
interpolate=True)
pl.plot(wavelengths, sp, 'k-',
label='Spectrum for Aperture {} at {:.0f}'.format(i,
row['position']))
pl.xlabel('Wavelength (microns)')
pl.ylabel('Flux (e-/sec)')
pl.xlim(wavelengths.value.min(),wavelengths.value.max())
pl.ylim(0,1.05*sp.max())
pl.legend(loc='best')
bn, ext = os.path.splitext(plotfileout)
plotfilename = '{}_{:02d}{}'.format(bn, i, ext)
pl.savefig(plotfilename, bbox_inches='tight')
pl.close(fig)
var = variances[i]
hdulist.append(fits.ImageHDU(data=var.filled(0), header=header))
hdulist[1].header['APPOS'] = row['position']
hdulist[1].header['COMMENT'] = 'VARIANCE DATA'
if fitsfileout:
bn, ext = os.path.splitext(fitsfileout)
fitsfilename = '{}_{:02d}{}'.format(bn, i, ext)
hdulist.writeto(fitsfilename, overwrite=True)
##-------------------------------------------------------------------------
## Extract Spectra Function
##-------------------------------------------------------------------------
def extract_spectra(maskname, band, interactive=True, width=10,
target='default'):
if target == 'default':
## Get objectnames from slit edges
edges = np.load('slit-edges_{}.npy'.format(band))
objectnames = [edge['Target_Name'] for edge in edges[:-1]]
eps_files = ['{}_{}_{}_eps.fits'.format(maskname, band, objectname)
for objectname in objectnames]
sig_files = ['{}_{}_{}_sig.fits'.format(maskname, band, objectname)
for objectname in objectnames]
spectrum_plot_files = ['{}_{}_{}.png'.format(maskname, band, objectname)
for objectname in objectnames]
fits_files = ['{}_{}_{}_1D.fits'.format(maskname, band, objectname)
for objectname in objectnames]
else:
objectnames = [target]
eps_files = ['{}_{}_eps.fits'.format(target, band)]
sig_files = ['{}_{}_sig.fits'.format(target, band)]
spectrum_plot_files = ['{}_{}.png'.format(target, band)]
fits_files = ['{}_{}_1D.fits'.format(target, band)]
aperture_tables = {}
if interactive:
print_instructions()
# First, iterate through all slits and define the apertures to extract
for i,eps_file in enumerate(eps_files):
objectname = objectnames[i]
eps = fits.open(eps_file, 'readonly')[0]
aperture_tables[objectname] = find_apertures(eps, width=width, title=objectname,
interactive=interactive,
maskname=maskname,
)
# Second, iterate through all slits again and perform spectral extraction
# using the apertures defined above
for i,eps_file in enumerate(eps_files):
objectname = objectnames[i]
sig_file = sig_files[i]
spectrum_plot_file = spectrum_plot_files[i]
fits_file = fits_files[i]
if len(aperture_tables[objectname]) == 0:
info('No apertures defined for {}. Skipping extraction.'.format(
objectname))
else:
info('Extracting spectra for {}'.format(objectname))
eps = fits.open(eps_file, 'readonly')[0]
sig = fits.open(sig_file, 'readonly')[0]
try:
optimal_extraction(eps, sig, aperture_tables[objectname],
fitsfileout=fits_file,
plotfileout=spectrum_plot_file,
)
except Exception as e:
warning('Failed to extract spectra for {}'.format(objectname))
warning(e)
#eps.close()
#sig.close()
if __name__ == "__main__":
cwd = os.path.abspath('.')
upone, band = os.path.split(cwd)
uptwo, date = os.path.split(upone)
upthree, maskname = os.path.split(uptwo)
extract_spectra(maskname, band, interactive=True)
| 26,176 | 39.334361 | 88 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/Rectify.py |
import os
import sys
import time
import numpy as np
from matplotlib import pyplot as pl
try:
from astropy.io import fits as pf
except:
import pyfits as pf
from multiprocessing import Pool
import scipy as sp
import scipy.ndimage
from scipy import interpolate as II
import warnings
import pdb
import MOSFIRE
from MOSFIRE import Background, CSU, Fit, IO, Options, Filters, Detector, Wavelength
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
def handle_rectification(maskname, in_files, wavename, band_pass, files, options,
commissioning_shift=3.0, target='default', plan=None):
'''Handle slit rectification and coaddition.
Args:
maskname: The mask name string
in_files: List of stacked spectra in electron per second. Will look
like ['electrons_Offset_1.5.txt.fits', 'electrons_Offset_-1.5.txt.fits']
wavename: path (relative or full) to the wavelength stack file, string
band_pass: Band pass name, string
barset_file: Path to a mosfire fits file containing the full set of
FITS extensions for the barset. It can be any file in the list
of science files.
Returns:
None
Writes files:
[maskname]_[band]_[object name]_eps.fits --
The rectified, background subtracted, stacked eps spectrum
[maskname]_[band]_[object name]_sig.fits --
Rectified, background subtracted, stacked weight spectrum (STD/itime)
[maskname]_[band]_[object_name]_itime.fits
Rectified, CRR stacked integration time spectrum
[maskname]_[band]_[object_name]_snrs.fits
Rectified signal to noise spectrum
'''
global edges, dats, vars, itimes, shifts, lambdas, band, fidl, all_shifts
band = band_pass
dlambda = Wavelength.grating_results(band)
hpp = Filters.hpp[band]
fidl = np.arange(hpp[0], hpp[1], dlambda)
lambdas = IO.readfits(wavename, options)
if np.any(lambdas[1].data < 0) or np.any(lambdas[1].data > 29000):
info("***********WARNING ***********")
info("The file {0} may not be a wavelength file.".format(wavename))
info("Check before proceeding.")
info("***********WARNING ***********")
edges, meta = IO.load_edges(maskname, band, options)
shifts = []
posnames = []
postoshift = {}
for file in in_files:
info(":: "+str(file))
II = IO.read_drpfits(maskname, file, options)
off = np.array((II[0]["decoff"], II[0]["raoff"]),dtype=np.float64)
if "yoffset" in II[0]:
off = -II[0]["yoffset"]
else:
# Deal with data taken during commissioning
if II[0]["frameid"] == 'A': off = 0.0
else: off = commissioning_shift
try: off0
except: off0 = off
shift = off - off0
shifts.append(shift)
posnames.append(II[0]["frameid"])
postoshift[II[0]['frameid']] = shift
info("Position {0} shift: {1:2.2f} as".format(off, shift))
# this is to deal with cases in which we want to rectify one single file
if len(set(posnames)) is 1:
plans = [['A']]
else:
if plan is None:
plans = Background.guess_plan_from_positions(set(posnames))
else:
plans = plan
all_shifts = []
for myplan in plans:
to_append = []
for pos in myplan:
to_append.append(postoshift[pos])
all_shifts.append(to_append)
# Reverse the elements in all_shifts to deal with an inversion
all_shifts.reverse()
theBPM = IO.badpixelmask()
all_solutions = []
cntr = 0
if target is 'default':
outname = maskname
else:
outname = target
for plan in plans:
if len(plan) is 1:
p0 = 'A'
p1 = 'B'
else:
p0 = plan[0].replace("'", "p")
p1 = plan[1].replace("'", "p")
suffix = "%s-%s" % (p0,p1)
info("Handling plan %s" % suffix)
fname = "bsub_{0}_{1}_{2}.fits".format(outname,band,suffix)
EPS = IO.read_drpfits(maskname, fname, options)
EPS[1] = np.ma.masked_array(EPS[1], theBPM, fill_value=0)
fname = "var_{0}_{1}_{2}.fits".format(outname, band, suffix)
VAR = IO.read_drpfits(maskname, fname, options)
VAR[1] = np.ma.masked_array(VAR[1], theBPM, fill_value=np.inf)
fname = "itime_{0}_{1}_{2}.fits".format(outname, band, suffix)
ITIME = IO.read_drpfits(maskname, fname, options)
ITIME[1] = np.ma.masked_array(ITIME[1], theBPM, fill_value=0)
dats = EPS
vars = VAR
itimes = ITIME
EPS[0]["ORIGFILE"] = fname
tock = time.time()
sols = list(range(len(edges)-1,-1,-1))
shifts = all_shifts[cntr]
cntr += 1
p = Pool()
solutions = p.map(handle_rectification_helper, sols)
p.close()
all_solutions.append(solutions)
tick = time.time()
info("-----> Mask took %i. Writing to disk." % (tick-tock))
output = np.zeros((1, len(fidl)))
snrs = np.zeros((1, len(fidl)))
sdout= np.zeros((1, len(fidl)))
itout= np.zeros((1, len(fidl)))
# the barset [bs] is used for determining object position
files = IO.list_file_to_strings(files)
info("Using "+str(files[0])+" for slit configuration.")
x, x, bs = IO.readmosfits(files[0], options)
for i_slit in range(len(solutions)):
solution = all_solutions[0][i_slit]
header = EPS[0].copy()
obj = header['OBJECT']
#Again some weirdness with Longslit target names
try:
target_name = str(bs.ssl[-(i_slit+1)]['Target_Name'], 'utf-8')
except TypeError:
target_name = bs.ssl[-(i_slit+1)]['Target_Name']
header['OBJECT'] = target_name
pixel_dist = np.float(bs.ssl[-(i_slit+1)]['Target_to_center_of_slit_distance'])/0.18
pixel_dist -= solution['offset']
ll = solution["lambda"]
header["wat0_001"] = "system=world"
header["wat1_001"] = "wtype=linear"
header["wat2_001"] = "wtype=linear"
header["dispaxis"] = 1
header["dclog1"] = "Transform"
header["dc-flag"] = 0
header["ctype1"] = "AWAV"
header["cunit1"] = "Angstrom"
header["crval1"] = ll[0]
header["crval2"] = -solution["eps_img"].shape[0]//2 - pixel_dist
header["crpix1"] = 1
header["crpix2"] = 1
header["cdelt1"] = ll[1]-ll[0]
header["cdelt2"] = 1
header["cname1"] = "angstrom"
header["cname2"] = "pixel"
header["cd1_1"] = ll[1]-ll[0]
header["cd1_2"] = 0
header["cd2_1"] = 0
header["cd2_2"] = 1
S = output.shape
img = solution["eps_img"]
std = solution["sd_img"]
tms = solution["itime_img"]
for i_solution in range(1,len(all_solutions)):
info("Combining solution %i" %i_solution)
solution = all_solutions[i_solution][i_slit]
img += solution["eps_img"]
std += solution["sd_img"]
tms += solution["itime_img"]
output = np.append(output, img, 0)
output = np.append(output, np.nan*np.zeros((3,S[1])), 0)
snrs = np.append(snrs, img*tms/std, 0)
snrs = np.append(snrs, np.nan*np.zeros((3,S[1])), 0)
sdout = np.append(sdout, std, 0)
sdout = np.append(sdout, np.nan*np.zeros((3,S[1])), 0)
itout = np.append(itout, tms, 0)
itout = np.append(itout, np.nan*np.zeros((3,S[1])), 0)
header['bunit'] = ('electron/second', 'electron power')
IO.writefits(img, maskname,
"{0}_{1}_{2}_eps.fits".format(outname, band, target_name), options,
overwrite=True, header=header, lossy_compress=False)
header['bunit'] = ('electron/second', 'sigma/itime')
IO.writefits(std/tms, maskname,
"{0}_{1}_{2}_sig.fits".format(outname, band, target_name), options,
overwrite=True, header=header, lossy_compress=False)
header['bunit'] = ('second', 'exposure time')
IO.writefits(tms, maskname,
"{0}_{1}_{2}_itime.fits".format(outname, band, target_name), options,
overwrite=True, header=header, lossy_compress=False)
header['bunit'] = ('', 'SNR')
IO.writefits(img*tms/std, maskname,
"{0}_{1}_{2}_snrs.fits".format(outname, band, target_name), options,
overwrite=True, header=header, lossy_compress=False)
header = EPS[0].copy()
header["wat0_001"] = "system=world"
header["wat1_001"] = "wtype=linear"
header["wat2_001"] = "wtype=linear"
header["dispaxis"] = 1
header["dclog1"] = "Transform"
header["dc-flag"] = 0
header["ctype1"] = "AWAV"
header["cunit1"] = ("Angstrom", 'Start wavelength')
header["crval1"] = ll[0]
header["crval2"] = 1
header["crpix1"] = 1
header["crpix2"] = 1
header["cdelt1"] = (ll[1]-ll[0], 'Angstrom/pixel')
header["cdelt2"] = 1
header["cname1"] = "angstrom"
header["cname2"] = "pixel"
header["cd1_1"] = (ll[1]-ll[0], 'Angstrom/pixel')
header["cd1_2"] = 0
header["cd2_1"] = 0
header["cd2_2"] = 1
header["bunit"] = "ELECTRONS/SECOND"
info("############ Final reduced file: {0}_{1}_eps.fits".format(outname,band))
IO.writefits(output, maskname, "{0}_{1}_eps.fits".format(outname,
band), options, overwrite=True, header=header,
lossy_compress=False)
header["bunit"] = ""
IO.writefits(snrs, maskname, "{0}_{1}_snrs.fits".format(outname,
band), options, overwrite=True, header=header,
lossy_compress=False)
header["bunit"] = "ELECTRONS/SECOND"
IO.writefits(sdout/itout, maskname, "{0}_{1}_sig.fits".format(outname,
band), options, overwrite=True, header=header,
lossy_compress=False)
header["bunit"] = "SECOND"
IO.writefits(itout, maskname, "{0}_{1}_itime.fits".format(outname,
band), options, overwrite=True, header=header,
lossy_compress=False)
def r_interpol(ls, ss, lfid, tops, top, shift_pix=0, pad=[0,0], fill_value=0.0):
'''
Interpolate the data ss(ls, fs) onto a fiducial wavelength vector.
ls[n_spatial, n_lam] - wavelength array
ss[n_spatial, n_lam] - corresponding data array
lfid[n_lam] - wavelength fiducial to interpolate onto
shift_pix - # of pixels to shift in spatial direction
pad - # of pixels to pad in spatial direction
fill_value - passed through to interp1d
'''
S = ss.shape
output = np.zeros((np.int(S[0]+pad[0]+pad[1]), len(lfid)))
output[:] = np.nan
L = np.double(len(lfid))
# First interpolate onto a common wavelength grid
for i in range(S[0]):
ll = ls[i,:]
sp = ss[i,:]
ok = np.where(ll>1000)[0]
if len(ok) >= 100:
f = II.interp1d(ll[ok], sp[ok], bounds_error=False,
fill_value = fill_value)
output[i,:] = f(lfid)
# Now rectify in spatial
vert_shift = tops-top-shift_pix
f = II.interp1d(ls[10, :], vert_shift, bounds_error=False,
fill_value = fill_value)
for i in range(output.shape[1]):
to_shift = f(fidl[i])
x = np.arange(output.shape[0])
y = II.interp1d(x, output[:, i], bounds_error=False,
fill_value=fill_value)
output[:,i] = y(x + to_shift)
return output
def handle_rectification_helper(edgeno):
''' All the rectification happens in this helper function. This helper function
is spawned as a separate process in the multiprocessing pool'''
global edges, dats, vars, itimes, shifts, lambdas, band, fidl,all_shifts
pix = np.arange(2048)
edge = edges[edgeno]
info("Handling edge: "+str(edge["Target_Name"]))
tops = edge["top"](pix)
bots = edge["bottom"](pix)
# Length of the slit in arcsecond
lenas = (tops[1024] - bots[1024]) * 0.18
mxshift = np.abs(np.int(np.ceil(np.max(all_shifts)/0.18)))
mnshift = np.abs(np.int(np.floor(np.min(all_shifts)/0.18)))
top = int(min(np.floor(np.min(tops)), 2048))
bot = int(max(np.ceil(np.max(bots)), 0))
ll = lambdas[1].data[bot:top, :]
eps = dats[1][bot:top, :].filled(0.0)
vv = vars[1][bot:top, :].filled(np.inf)
it = itimes[1][bot:top, :].filled(0.0)
lmid = ll[ll.shape[0]//2,:]
hpp = Filters.hpp[band]
minl = lmid[0] if lmid[0]>hpp[0] else hpp[0]
maxl = lmid[-1] if lmid[-1]<hpp[1] else hpp[1]
epss = []
ivss = []
itss = []
if len(shifts) is 1: sign = 1
else: sign = -1
for shift in shifts:
output = r_interpol(ll, eps, fidl, tops, top, shift_pix=shift/0.18,
pad=[mnshift, mxshift], fill_value = np.nan)
epss.append(sign * output)
ivar = 1/vv
bad = np.where(np.isfinite(ivar) ==0)
ivar[bad] = 0.0
output = r_interpol(ll, ivar, fidl, tops, top, shift_pix=shift/0.18,
pad=[mnshift, mxshift], fill_value=np.nan)
ivss.append(output)
output = r_interpol(ll, it, fidl, tops, top, shift_pix=shift/0.18,
pad=[mnshift, mxshift], fill_value=np.nan)
itss.append(output)
sign *= -1
# the "mean of empty slice" warning are generated at the top and bottom edges of the array
# where there is basically no data due to the shifts between a and b positions
# we could pad a little bit less, or accept the fact that the slits have a couple of rows of
# nans in the results.
warnings.filterwarnings('ignore','Mean of empty slice')
it_img = np.nansum(np.array(itss), axis=0)
eps_img = np.nanmean(epss, axis=0)
warnings.filterwarnings('always')
# Remove any NaNs or infs from the variance array
ivar_img = []
for ivar in ivss:
bad = np.where(np.isfinite(ivar) == 0)
ivar[bad] = 0.0
ivar_img.append(ivar)
IV = np.array(ivar_img)
bad = np.isclose(IV,0)
IV[bad] = np.inf
var_img = np.nanmean(1/np.array(IV), axis=0)
sd_img = np.sqrt(var_img)
return {"eps_img": eps_img, "sd_img": sd_img, "itime_img": it_img,
"lambda": fidl, "Target_Name": edge["Target_Name"],
"slitno": edgeno+1, "offset": np.max(tops-top)}
| 14,322 | 31.478458 | 96 | py |
MosfireDRP | MosfireDRP-master/MOSFIRE/nmpfit_mos.py | """
Python/Numeric version of this module was called mpfit. This version was modified to use numpy.
"""
from __future__ import division # confidence medium
__version__ = '0.2'
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = Numeric.zeros([len(x), len(p)], Numeric.Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}]*5
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import Numeric
x = Numeric.arange(100, Numeric.float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*Numeric.sqrt(x) +
p[4]*Numeric.log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print('status = ', m.status)
if (m.status <= 0): print('error message = ', m.errmsg)
print('parameters = ', m.params)
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
"""
import MOSFIRE.numerixenv as numerixenv
numerixenv.check()
import numpy
import types
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit (object):
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
fastnorm=0, rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
fastnorm:
Set this keyword to select a faster algorithm to compute sum-of-square
values internally. For systems with large numbers of data points, the
standard algorithm can become prohibitively slow because it cannot be
vectorized well. By setting this keyword, MPFIT will run faster, but
it will be more prone to floating point overflows and underflows. Thus, setting
this keyword may sacrifice some stability in the fitting process.
Default: clear (=0)
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/Numeric.sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * numpy.sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.fastnorm = fastnorm
self.nfev = 0
self.damp = damp
self.machar = machar(double=1)
machep = self.machar.machep
if (fcn==None):
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if (iterfunct == 'default'): iterfunct = self.defiter
## Parameter damping doesn't work when user is providing their own
## gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
## Parameters can either be stored in parinfo, or x. x takes precedence if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
## Be sure that PARINFO is of the right type
if (parinfo is not None):
if (type(parinfo) != list):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if (type(parinfo[0]) != dict):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
## If the parameters were not specified at the command line, then
## extract them from PARINFO
if (xall is None):
xall = self.parinfo(parinfo, 'value')
if (xall is None):
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
## Make sure parameters are numpy arrays of type numpy.float
#print('xall', xall, type(xall))
xall = numpy.asarray(xall, numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
## TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if (ptied[i] != ''): self.qanytied = 1
self.ptied = ptied
## FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[i] != '') ## Tied parameters are also effectively fixed
## Finite differencing step, absolute and relative, and sidedness of deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
## Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep * 0 ## Remove minstep for now!!
qmax = maxstep != 0
wh = numpy.nonzero(((qmin!=0.) & (qmax!=0.)) & (maxstep < minstep))
#check if it's 1d array?
if (len(wh[0]) > 0):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = numpy.nonzero((qmin!=0.) & (qmax!=0.))
qminmax = len(wh[0] > 0)
## Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
## Compose only VARYING parameters
self.params = xall ## self.params is the set of parameters to be returned
x = numpy.take(self.params, ifree) ## x is the set of free parameters
## LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0,0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0.,0.], n=npar)
if (limited is not None) and (limits is not None):
## Error checking on limits in parinfo
wh = numpy.nonzero((limited[:,0] & (xall < limits[:,0])) |
(limited[:,1] & (xall > limits[:,1])))
if (len(wh[0]) > 0):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
wh = numpy.nonzero((limited[:,0] & limited[:,1]) &
(limits[:,0] >= limits[:,1]) &
(pfixed == 0))
if (len(wh[0]) > 0):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
## Transfer structure values to local variables
qulim = numpy.take(limited[:,1], ifree)
ulim = numpy.take(limits [:,1], ifree)
qllim = numpy.take(limited[:,0], ifree)
llim = numpy.take(limits [:,0], ifree)
wh = numpy.nonzero((qulim!=0.) | (qllim!=0.))
if (len(wh[0]) > 0): qanylim = 1
else: qanylim = 0
else:
## Fill in local variables with dummy values
qulim = numpy.zeros(nfree, dtype=n.int8)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
## Check input parameters for errors
if ((n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0)
or (maxiter <= 0) or (factor <= 0)):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if (rescale != 0):
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if (len(diag) < n): return
wh = (numpy.nonzero(diag <= 0))[0]
if (len(wh) > 0): return
self.errmsg = ''
# Make sure x is a numpy array of type numpy.float
x = numpy.asarray(x, numpy.float64)
[self.status, fvec] = self.call(fcn, self.params, functkw)
if (self.status < 0):
self.errmsg = 'ERROR: first call to "'+str(fcn)+'" failed'
return
m = len(fvec)
if (m < n):
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.fnorm = self.enorm(fvec)
## Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
## Beginning of the outer loop
while(1):
## If requested, call fcn to enable printing of iterates
numpy.put(self.params, ifree, x)
if (self.qanytied): self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if (((self.niter-1) % nprint) == 0):
mperr = 0
xnew0 = self.params.copy()
dof = max(len(fvec) - len(x), 0)
status = iterfunct(fcn, self.params, self.niter, self.fnorm**2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if (status is not None): self.status = status
## Check for user termination
if (self.status < 0):
self.errmsg = 'WARNING: premature termination by ' + str(iterfunct)
return
## If parameters were changed (grrr..) then re-tie
if (max(abs(xnew0-self.params)) > 0):
if (self.qanytied): self.params = self.tie(self.params, ptied)
x = numpy.take(self.params, ifree)
## Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if (fjac is None):
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
## Determine if any of the parameters are pegged at the limits
if (qanylim):
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)) )[0]
nupeg = len(whupeg)
## See if any "pegged" values should keep their derivatives
if (nlpeg > 0):
## Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum = numpy.sum(fvec * fjac[:,whlpeg[i]])
if (sum > 0): fjac[:,whlpeg[i]] = 0
if (nupeg > 0):
## Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum = numpy.sum(fvec * fjac[:,whupeg[i]])
if (sum < 0): fjac[:,whupeg[i]] = 0
## Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
## On the first iteration if "diag" is unspecified, scale
## according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if (self.niter == 1):
if ((rescale==0) or (len(diag) < n)):
diag = wa2.copy()
wh = (numpy.nonzero(diag == 0) )[0]
numpy.put(diag, wh, 1.)
## On the first iteration, calculate the norm of the scaled x
## and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if (delta == 0.): delta = factor
## Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j,lj]
if (temp3 != 0):
fj = fjac[j:,lj]
wj = wa4[j:]
## *** optimization wa4(j:*)
wa4[j:] = wj - fj * numpy.sum(fj*wj) / temp3
fjac[j,lj] = wa1[j]
qtf[j] = wa4[j]
## From this point on, only the square matrix, consisting of the
## triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:,i] = fjac[:, ipvt[i]]
fjac = temp.copy()
## Check for overflow. This should be a cheap test here since FJAC
## has been reduced to a (small) square matrix, and the test is
## O(N^2).
#wh = where(finite(fjac) EQ 0, ct)
#if ct GT 0 then goto, FAIL_OVERFLOW
## Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if (self.fnorm != 0):
for j in range(n):
l = ipvt[j]
if (wa2[l] != 0):
sum = numpy.sum(fjac[0:j+1,j]*qtf[0:j+1])/self.fnorm
gnorm = max([gnorm,abs(sum/wa2[l])])
## Test for convergence of the gradient norm
if (gnorm <= gtol):
self.status = 4
return
## Rescale if necessary
if (rescale == 0):
diag = numpy.choose(diag>wa2, (wa2, diag))
## Beginning of the inner loop
while(1):
## Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
## Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
## No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
## Respect the limits. If a step were to go out of bounds, then
## we should take a step in the same direction but shorter distance.
## The step should take us right to the limit in that case.
alpha = 1.
if (qanylim):
## Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if (nlpeg > 0):
numpy.put(wa1, whlpeg, numpy.clip(
numpy.take(wa1, whlpeg), 0., max(wa1)))
if (nupeg > 0):
numpy.put(wa1, whupeg, numpy.clip(
numpy.take(wa1, whupeg), min(wa1), 0.))
dwa1 = abs(wa1) > machep
whl = (numpy.nonzero(((dwa1!=0.) & qllim) & ((x + wa1) < llim)) )[0]
if (len(whl) > 0):
t = (((numpy.take(llim, whl) - numpy.take(x, whl)) /
numpy.take(wa1, whl)))
alpha = min(alpha, min(t))
whu = (numpy.nonzero(((dwa1!=0.) & qulim) & ((x + wa1) > ulim)) )[0]
if (len(whu) > 0):
t = ((numpy.take(ulim, whu) - numpy.take(x, whu)) /
numpy.take(wa1, whu))
alpha = min(alpha, min(t))
## Obey any max step values.
if (qminmax):
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((qmax != 0.) & (maxstep > 0)) )[0]
if (len(whmax) > 0):
mrat = max(numpy.take(nwa1, whmax) /
numpy.take(maxstep, whmax))
if (mrat > 1): alpha = alpha / mrat
## Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
## Adjust the final output values. If the step put us exactly
## on a boundary, make sure it is exact.
wh = (numpy.nonzero((qulim!=0.) & (wa2 >= ulim*(1-machep))) )[0]
if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(ulim, wh))
wh = (numpy.nonzero((qllim!=0.) & (wa2 <= llim*(1+machep))) )[0]
if (len(wh) > 0): numpy.put(wa2, wh, numpy.take(llim, wh))
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
## On the first iteration, adjust the initial step bound
if (self.niter == 1): delta = min([delta,pnorm])
numpy.put(self.params, ifree, wa2)
## Evaluate the function at x+p and calculate its norm
mperr = 0
catch_msg = 'calling '+str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if (self.status < 0):
self.errmsg = 'WARNING: premature termination by "'+fcn+'"'
return
fnorm1 = self.enorm(wa4)
## Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if ((0.1 * fnorm1) < self.fnorm): actred = - (fnorm1/self.fnorm)**2 + 1.
## Compute the scaled predicted reduction and the scaled directional
## derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1,j]*wa1[ipvt[j]]
## Remember, alpha is the fraction of the full LM step actually
## taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
## Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if (prered != 0): ratio = actred/prered
## Update the step bound
if (ratio <= 0.25):
if (actred >= 0): temp = .5
else: temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1): temp = 0.1
delta = temp*min([delta,pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
## Test for successful iteration
if (ratio >= 0.0001):
## Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
## Tests for convergence
if ((abs(actred) <= ftol) and (prered <= ftol)
and (0.5 * ratio <= 1)): self.status = 1
if delta <= xtol*xnorm: self.status = 2
if ((abs(actred) <= ftol) and (prered <= ftol)
and (0.5 * ratio <= 1) and (self.status == 2)): self.status = 3
if (self.status != 0): break
## Tests for termination and stringent tolerances
if (self.niter >= maxiter): self.status = 5
if ((abs(actred) <= machep) and (prered <= machep)
and (0.5*ratio <= 1)): self.status = 6
if delta <= machep*xnorm: self.status = 7
if gnorm <= machep: self.status = 8
if (self.status != 0): break
## End of inner loop. Repeat if iteration unsuccessful
if (ratio >= 0.0001): break
## Check for over/underflow - SKIP FOR NOW
##wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
##if ct GT 0 OR finite(ratio) EQ 0 then begin
## errmsg = ('ERROR: parameter or function value(s) have become '+$
## 'infinite# check model function for over- '+$
## 'and underflow')
## self.status = -16
## break
if (self.status != 0): break;
## End of outer loop.
catch_msg = 'in the termination phase'
## Termination, either normal or user imposed.
if (len(self.params) == 0):
return
if (nfree == 0): self.params = xall.copy()
else: numpy.put(self.params, ifree, x)
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if ((self.fnorm is not None) and (fnorm1 is not None)):
self.fnorm = max([self.fnorm, fnorm1])
self.fnorm = self.fnorm**2.
self.covar = None
self.perror = None
## (very carefully) set the covariance matrix COVAR
if ((self.status > 0) and (nocovar==0) and (n is not None)
and (fjac is not None) and (ipvt is not None)):
sz = numpy.shape(fjac)
if ((n > 0) and (sz[0] >= n) and (sz[1] >= n)
and (len(ipvt) >= n)):
catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n,0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
## Fill in actual covariance matrix, accounting for fixed
## parameters.
self.covar = numpy.zeros([nn, nn], numpy.float)
for i in range(n):
indices = ifree+ifree[i]*nn
numpy.put(self.covar, indices, cv[:,i])
#numpy.put(self.covar, i, cv[:,i])
## Compute errors in parameters
catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, numpy.float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0) )[0]
if len(wh) > 0:
numpy.put(self.perror, wh, numpy.sqrt(numpy.take(d, wh)))
return
## Default procedure to be called every iteration. It simply prints
## the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if (self.debug): print('Entering defiter...')
if (quiet): return
if (fnorm is None):
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
## Determine which parameters to print
nprint = len(x)
print("Iter ", ('%6i' % iter)," CHI-SQUARE = ",('%.10g' % fnorm)," DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if (iprint):
print(p + (pformat % x[i]) + ' ')
return(0)
## DO_ITERSTOP:
## if keyword_set(iterstop) then begin
## k = get_kbrd(0)
## if k EQ string(byte(7)) then begin
## message, 'WARNING: minimization not complete', /info
## print, 'Do you want to terminate this procedure? (y/n)', $
## format='(A,$)'
## k = ''
## read, k
## if strupcase(strmid(k,0,1)) EQ 'Y' then begin
## message, 'WARNING: Procedure is terminating.', /info
## mperr = -1
## endif
## endif
## endif
## Procedure to parse the parameter values in PARINFO, which is a list of dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if (self.debug): print('Entering parinfo...')
if (n == 0) and (parinfo is not None): n = len(parinfo)
if (n == 0):
values = default
return(values)
values = []
for i in range(n):
if ((parinfo is not None) and (key in parinfo[i])):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if (type(default) == list): test=default[0]
if (type(test) == int):
values = numpy.asarray(values, dtype=numpy.int)
elif (type(test) == float):
values = numpy.asarray(values, dtype=numpy.float)
return(values)
## Call user function or procedure, with _EXTRA or not, with
## derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if (self.debug): print('Entering call...')
if (self.qanytied): x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if (fjac is None):
[status, f] = fcn(x, fjac=fjac, **functkw)
if (self.damp > 0):
## Apply the damping if requested. This replaces the residuals
## with their hyperbolic tangent. Thus residuals larger than
## DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return([status, f])
else:
return(fcn(x, fjac=fjac, **functkw))
def enorm(self, vec):
if (self.debug): print('Entering enorm...')
## NOTE: it turns out that, for systems that have a lot of data
## points, this routine is a big computing bottleneck. The extended
## computations that need to be done cannot be effectively
## vectorized. The introduction of the FASTNORM configuration
## parameter allows the user to select a faster routine, which is
## based on TOTAL() alone.
# Very simple-minded sum-of-squares
if (self.fastnorm):
ans = numpy.sqrt(numpy.sum(vec*vec))
else:
agiant = self.machar.rgiant / len(vec)
adwarf = self.machar.rdwarf * len(vec)
## This is hopefully a compromise between speed and robustness.
## Need to do this because of the possibility of over- or underflow.
mx = max(vec)
mn = min(vec)
mx = max(abs(mx), abs(mn))
if mx == 0: return(vec[0]*0.)
if mx > agiant or mx < adwarf:
ans = mx * numpy.sqrt(numpy.sum((vec/mx)*(vec/mx)))
else:
ans = numpy.sqrt(numpy.sum(vec*vec))
return(ans)
def fdjac2(self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if (self.debug): print('Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None: epsfcn = machep
if xall is None: xall = x
if ifree is None: ifree = numpy.arange(len(xall))
if step is None: step = x * 0.
nall = len(xall)
eps = numpy.sqrt(max([epsfcn, machep]))
m = len(fvec)
n = len(x)
## Compute analytical derivative if requested
if (autoderivative == 0):
mperr = 0
fjac = numpy.zeros(nall, numpy.float)
numpy.put(fjac, ifree, 1.0) ## Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m*nall:
print('ERROR: Derivative matrix was not computed properly.')
return(None)
## This definition is c1onsistent with CURVEFIT
## Sign error found (thanks Jesus Fernandez <fernande@irm.chu-caen.fr>)
fjac.shape = [m,nall]
fjac = -fjac
## Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:,ifree]
fjac.shape = [m, n]
return(fjac)
fjac = numpy.zeros([m, n], numpy.float)
h = eps * abs(x)
## if STEP is given, use that
if step is not None:
stepi = numpy.take(step, ifree)
wh = (numpy.nonzero(stepi > 0) )[0]
if (len(wh) > 0): numpy.put(h, wh, numpy.take(stepi, wh))
## if relative step is given, use that
if (len(dstep) > 0):
dstepi = numpy.take(dstep, ifree)
wh = (numpy.nonzero(dstepi > 0) )[0]
if len(wh) > 0: numpy.put(h, wh, abs(numpy.take(dstepi,wh)*numpy.take(x,wh)))
## In case any of the step values are zero
wh = (numpy.nonzero(h == 0) )[0]
if len(wh) > 0: numpy.put(h, wh, eps)
## Reverse the sign of the step if we are up against the parameter
## limit, or if the user requested it.
#mask = dside == -1
mask = numpy.take((dside == -1), ifree)
if len(ulimited) > 0 and len(ulimit) > 0:
#mask = mask or (ulimited and (x > ulimit-h))
mask = mask | (ulimited & (x > ulimit-h))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0: numpy.put(h, wh, -numpy.take(h, wh))
## Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if (status < 0): return(None)
if abs(dside[j]) <= 1:
## COMPUTE THE ONE-SIDED DERIVATIVE
## Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fvec)/h[j]
else:
## COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if (status < 0): return(None)
## Note optimization fjac(0:*,j)
fjac[0:,j] = (fp-fm)/(2*h[j])
return(fjac)
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident ## identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 ## extract reflector
# hh = hh ## (ident - 2*(v # v)/total(v * v)) ## generate matrix
# endfor
#
# Test the result:
# IDL> print, hh ## transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if (self.debug): print('Entering qrfac...')
machep = self.machar.machep
sz = numpy.shape(a)
m = sz[0]
n = sz[1]
## Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, numpy.float)
for j in range(n):
acnorm[j] = self.enorm(a[:,j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
## Reduce a to r with householder transformations
minmn = min([m,n])
for j in range(minmn):
if (pivot != 0):
## Bring the column of largest norm into the pivot position
rmax = max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax) )[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
## Exchange rows via the pivot only. Avoid actually exchanging
## the rows, in case there is lots of memory transfer. The
## exchange occurs later, within the body of MPFIT, after the
## extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j] ; ipvt[j] = ipvt[kmax] ; ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
## Compute the householder transformation to reduce the jth
## column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:,lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0: break
if a[j,j] < 0: ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
## *** Note optimization a(j:*,j)
a[j:,lj] = ajj
## Apply the transformation to the remaining columns
## and update the norms
## NOTE to SELF: tried to optimize this by removing the loop,
## but it actually got slower. Reverted to "for" loop to keep
## it simple.
if (j+1 < n):
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:,lk]
## *** Note optimization a(j:*,lk)
## (corrected 20 Jul 2000)
if a[j,lj] != 0:
a[j:,lk] = ajk - ajj * numpy.sum(ajk*ajj)/a[j,lj]
if ((pivot != 0) and (rdiag[k] != 0)):
temp = a[j,lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(max((1.-temp**2), 0.))
temp = rdiag[k]/wa[k]
if ((0.05*temp*temp) <= machep):
rdiag[k] = self.enorm(a[j+1:,lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return([a, ipvt, rdiag, acnorm])
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if (self.debug): print('Entering qrsolv...')
sz = numpy.shape(r)
m = sz[0]
n = sz[1]
## copy r and (q transpose)*b to preserve input and initialize s.
## in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n,j] = r[j,j:n]
x = numpy.diagonal(r)
wa = qtb.copy()
## Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if (diag[l] == 0): break
sdiag[j:] = 0
sdiag[j] = diag[l]
## The transformations to eliminate the row of d modify only a
## single element of (q transpose)*b beyond the first n, which
## is initially zero.
qtbpj = 0.
for k in range(j,n):
if (sdiag[k] == 0): break
if (abs(r[k,k]) < abs(sdiag[k])):
cotan = r[k,k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k,k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
## Compute the modified diagonal element of r and the
## modified element of ((q transpose)*b,0).
r[k,k] = cosine*r[k,k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
## Accumulate the transformation in the row of s
if (n > k+1):
temp = cosine*r[k+1:n,k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n,k] + cosine*sdiag[k+1:n]
r[k+1:n,k] = temp
sdiag[j] = r[j,j]
r[j,j] = x[j]
## Solve the triangular system for z. If the system is singular
## then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0) )[0]
if (len(wh) > 0):
nsing = wh[0]
wa[nsing:] = 0
if (nsing >= 1):
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] ## Degenerate case
## *** Reverse loop ***
for j in range(nsing-2,-1,-1):
sum = numpy.sum(r[j+1:nsing,j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum)/sdiag[j]
## Permute the components of z back to components of x
x = numpy.copy(x)
numpy.put(x, ipvt, wa)
return(r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if (self.debug): print('Entering lmpar...')
dwarf = self.machar.minnum
sz = numpy.shape(r)
m = sz[0]
n = sz[1]
## Compute and store in x the gauss-newton direction. If the
## jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
wh = (numpy.nonzero(numpy.diagonal(r) == 0) )[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing > 1:
## *** Reverse loop ***
for j in range(nsing-1,-1,-1):
wa1[j] = wa1[j]/r[j,j]
if (j-1 >= 0):
wa1[0:j] = wa1[0:j] - r[0:j,j]*wa1[j]
## Note: ipvt here is a permutation array
numpy.put(x, ipvt, wa1)
## Initialize the iteration counter. Evaluate the function at the
## origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if (fp <= 0.1*delta):
return[r, 0., x, sdiag]
## If the jacobian is not rank deficient, the newton step provides a
## lower bound, parl, for the zero of the function. Otherwise set
## this bound to zero.
parl = 0.
if nsing >= n:
wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm
wa1[0] = wa1[0] / r[0,0] ## Degenerate case
for j in range(1,n): ## Note "1" here, not zero
sum = numpy.sum(r[0:j,j]*wa1[0:j])
wa1[j] = (wa1[j] - sum)/r[j,j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
## Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum = numpy.sum(r[0:j+1,j]*qtb[0:j+1])
wa1[j] = sum/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0: paru = dwarf/min([delta,0.1])
## If the input par lies outside of the interval (parl,paru), set
## par to the closer endpoint
par = max([par,parl])
par = min([par,paru])
if par == 0: par = gnorm/dxnorm
## Beginning of an interation
while(1):
iter = iter + 1
## Evaluate the function at the current value of par
if par == 0: par = max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if ((abs(fp) <= 0.1*delta) or
((parl == 0) and (fp <= temp) and (temp < 0)) or
(iter == 10)): break;
## Compute the newton correction
wa1 = numpy.take(diag, ipvt)*numpy.take(wa2, ipvt)/dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n,j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] ## Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
## Depending on the sign of the function, update parl or paru
if fp > 0: parl = max([parl,par])
if fp < 0: paru = min([paru,par])
## Compute an improved estimate for par
par = max([parl, par+parc])
## End of an iteration
## Termination
return[r, par, x, sdiag]
## Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if (self.debug): print('Entering tie...')
if (ptied is None): return
for i in range(len(ptied)):
if ptied[i] == '': continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return(p)
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if (self.debug): print('Entering calc_covar...')
if numpy.ndim(rr) != 2:
print('ERROR: r must be a two-dimensional matrix')
return(-1)
s = numpy.shape(rr)
n = s[0]
if s[0] != s[1]:
print('ERROR: r must be a square matrix')
return(-1)
if (ipvt is None): ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n,n]
## For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * abs(r[0,0])
for k in range(n):
if (abs(r[k,k]) <= tolr): break
r[k,k] = 1./r[k,k]
for j in range(k):
temp = r[k,k] * r[j,k]
r[j,k] = 0.
r[0:j+1,k] = r[0:j+1,k] - temp*r[0:j+1,j]
l = k
## Form the full upper triangle of the inverse of (r transpose)*r
## in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j,k]
r[0:j+1,j] = r[0:j+1,j] + temp*r[0:j+1,k]
temp = r[k,k]
r[0:k+1,k] = temp * r[0:k+1,k]
## For the full lower triangle of the covariance matrix
## in the strict lower triangle or and in wa
wa = numpy.repeat([r[0,0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing: r[i,j] = 0.
ii = ipvt[i]
if ii > jj: r[ii,jj] = r[i,j]
if ii < jj: r[jj,ii] = r[i,j]
wa[jj] = r[j,j]
## Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1,j] = r[j,0:j+1]
r[j,j] = wa[j]
return(r)
class machar(object):
def __init__(self, double=1):
if (double == 0):
self.machep = 1.19209e-007
self.maxnum = 3.40282e+038
self.minnum = 1.17549e-038
self.maxgam = 171.624376956302725
else:
self.machep = 2.2204460e-016
self.maxnum = 1.7976931e+308
self.minnum = 2.2250739e-308
self.maxgam = 171.624376956302725
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
| 96,849 | 41.552724 | 145 | py |
MosfireDRP | MosfireDRP-master/scripts/AutoDriver.py | #!/usr/local/bin/python
import MOSFIRE
from MOSFIRE import IO, Wavelength
from MOSFIRE.IO import fname_to_path
import os
try:
from astropy.io import fits as pf
except:
import pyfits as pf
import time
import sys
import glob
class Driver(object):
def __init__(self,outputFile,type):
self.outputFile = outputFile
self.type = type
self.offsetFiles = []
allowedTypes = ['slitmask', 'longslit', 'long2pos', 'long2pos_specphot']
if self.type not in allowedTypes:
print("Unknown driver type")
else:
print("Generating automatic driver file "+outputFile)
self.target = open(outputFile,'w')
self.import_section()
def addLine(self, line):
self.target.write(line+"\n")
def import_section(self):
self.addLine("import matplotlib")
self.addLine("matplotlib.use('TkAgg') # Force TkAgg backend for interactivity. This is")
self.addLine(" # critical to bypass a bug in the MacOSX backend.")
self.addLine("import os")
self.addLine("import time")
self.addLine("import logging")
self.addLine("logger = logging.getLogger(__name__)")
self.addLine("")
self.addLine("import MOSFIRE")
self.addLine("from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify, Wavelength, Extract")
self.addLine("from MOSFIRE.MosfireDrpLog import info, debug, warning, error")
self.addLine("")
self.addLine("import numpy as np")
self.addLine("np.seterr(all='ignore')")
# self.addLine("")
# self.addLine("from matplotlib import pyplot as pl")
# self.addLine("from astropy.io import fits as pf")
self.addLine("")
self.addLine("flatops = Options.flat")
self.addLine("waveops = Options.wavelength")
self.addLine("")
def addOffsetFiles(self,offsetFiles, resetList=False):
# might not be needed
if resetList:
self.offsetFiles = []
for offsetfile in offsetFiles:
self.offsetFiles.append(offsetfile)
def printObsfiles(self,obsfiles):
for obsfile in obsfiles:
self.addLine(obsfile)
self.addLine("")
def printnoninteractive(self,noninteractive=False):
self.addLine("#Set noninteractive to True to autofit wavelenth solution instead of manually fitting.")
self.addLine("noninteractiveflag="+str(noninteractive))
def printMaskAndBand(self):
offsetfile = self.offsetFiles[0]
fname = IO.list_file_to_strings(offsetfile)
if os.path.isabs(fname[0]):
path = fname[0]
else:
path = os.path.join(fname_to_path(fname[0]), fname[0])
hdulist = pf.open(path)
header = hdulist[0].header
self.maskName = header['maskname']
self.band = header['filter']
self.addLine("maskname = '"+str(self.maskName)+"'")
self.addLine("band = '"+str(self.band)+"'")
self.addLine("")
def isEmpty(self,file):
if not os.path.exists(file):
return True
fname = IO.list_file_to_strings(file)
if len(fname):
return False
else:
return True
def printFlat(self):
longslit=""
if self.type is 'long2pos' or self.type is 'long2pos_specphot' or self.type is 'longslit':
longslit=",longslit=longslit"
# using only Flat.txt
if os.path.isfile('Flat.txt'):
if self.isEmpty('Flat.txt') is True:
self.addLine("### WARNING: Flat.txt is empty! ###")
flatLine = "Flats.handle_flats('Flat.txt', maskname, band, flatops"+longslit+")"
# using both Flat.txt and FlatThermal.txt
if os.path.isfile('FlatThermal.txt') and self.band is 'K':
if self.isEmpty('FlatThermal.txt') is True:
self.addLine("### WARNING: FlatThermal.txt is empty! ###")
flatLine = "Flats.handle_flats('Flat.txt', maskname, band, flatops,lampOffList='FlatThermal.txt'"+longslit+")"
# write the resulting line
self.addLine(flatLine)
self.addLine("")
def addLongslit(self):
if self.type is 'long2pos' or self.type is 'long2pos_specphot':
self.addLine("# Note: for long2pos, the row position is ignored, and the middle point of the slit is used")
self.addLine("longslit = {'yrange': [[1062,1188],[887,1010]], 'row_position': 0, 'mode':'long2pos'}")
if self.type is 'longslit':
# use the slitname to determine the range (such as LONGSLIT-3x0.7)
numberOfSlits = int(self.maskName.lstrip("LONGSLIT-").split("x")[0])
verticalOffset = 10 # this is the vertical offset to apply to each measurement to shift the position up in the detector. It seems to be around 10
slitSizePixels = int(numberOfSlits*(2048/46))
slitTop = 1024+slitSizePixels//2+verticalOffset
slitBottom = 1024-slitSizePixels//2+verticalOffset
RowPosition = 1024+verticalOffset
self.addLine("longslit = {'yrange':["+str(slitBottom)+","+str(slitTop)+"],'row_position':"+str(RowPosition)+",'mode':'longslit'}")
def printWavelengthFit(self):
if self.type is 'longslit' or self.type is 'long2pos':
addLongSlit = ",longslit=longslit"
else:
addLongSlit = ""
if self.type is 'slitmask' or self.type is 'longslit':
self.useNeon = False
self.useArgon = False
# determine is Argon and Neon files contain data for K bands
if self.isEmpty('Ar.txt') is False and self.band is 'K':
self.useArgon = True
if self.isEmpty('Ne.txt') is False and self.band is 'K':
self.useNeon = True
self.addLine("Wavelength.imcombine(obsfiles, maskname, band, waveops)")
if self.useArgon:
self.addLine("Wavelength.imcombine('Ar.txt', maskname, band, waveops)")
if self.useNeon:
self.addLine("Wavelength.imcombine('Ne.txt', maskname, band, waveops)")
self.addLine("Wavelength.fit_lambda_interactively(maskname, band, obsfiles,waveops"+addLongSlit+", noninteractive=noninteractiveflag)")
if self.useArgon:
self.addLine("Wavelength.apply_interactive(maskname, band, waveops, apply=obsfiles, to='Ar.txt', argon=True)")
if self.useNeon:
self.addLine("Wavelength.apply_interactive(maskname, band, waveops, apply=obsfiles, to='Ne.txt', neon=True)")
self.addLine("Wavelength.fit_lambda(maskname, band, obsfiles, obsfiles,waveops"+addLongSlit+")")
if self.useArgon and self.useNeon:
self.addLine("Wavelength.fit_lambda(maskname, band, 'Ne.txt', 'Ne.txt',waveops, wavenames2='Ar.txt'"+addLongSlit+")")
if self.useArgon and not self.useNeon:
self.addLine("Wavelength.fit_lambda(maskname, band, 'Ar.txt', 'Ar.txt',waveops"+addLongSlit+")")
if self.useNeon and not self.useArgon:
self.addLine("Wavelength.fit_lambda(maskname, band, 'Ne.txt', 'Ne.txt',waveops"+addLongSlit+")")
if self.useNeon or self.useArgon:
self.addLine("LROI = [[21000,22800]]*1")
if self.useNeon:
self.addLine("LROIs = Wavelength.check_wavelength_roi(maskname, band, obsfiles, 'Ne.txt', LROI, waveops)")
if self.useArgon and not self.useNeon:
self.addLine("LROIs = Wavelength.check_wavelength_roi(maskname, band, obsfiles, 'Ar.txt', LROI, waveops)")
self.addLine("Wavelength.apply_lambda_simple(maskname, band, obsfiles, waveops"+addLongSlit+")")
if self.useArgon and self.useNeon:
self.addLine("Wavelength.apply_lambda_sky_and_arc(maskname, band, obsfiles, 'Ne.txt', LROIs, waveops)")
if self.useArgon and not self.useNeon:
self.addLine("Wavelength.apply_lambda_sky_and_arc(maskname, band, obsfiles, 'Ar.txt', LROIs, waveops)")
if self.useNeon and not self.useArgon:
self.addLine("Wavelength.apply_lambda_sky_and_arc(maskname, band, obsfiles, 'Ne.txt', LROIs, waveops)")
# determine waveleng name
files = IO.list_file_to_strings(self.offsetFiles)
if self.useNeon:
neon_files = IO.list_file_to_strings('Ne.txt')
self.waveName = "merged_lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,"")).rstrip(".fits")+"_and_"+str(Wavelength.filelist_to_wavename(neon_files, self.band, self.maskName,""))
elif self.useArgon and not self.useNeon:
argon_files = IO.list_file_to_strings('Ar.txt')
self.waveName = "merged_lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,"")).rstrip(".fits")+"_and_"+str(Wavelength.filelist_to_wavename(argon_files, self.band, self.maskName,""))
else:
self.waveName = "lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,""))
if self.type is 'long2pos' or self.type is 'long2pos_specphot':
calibWith = ""
if self.isEmpty('Ar.txt') is False:
self.addLine("argon = ['Ar.txt']")
calibWith = "argon"
waveFiles = IO.list_file_to_strings('Ar.txt')
if self.isEmpty('Ne.txt') is False:
self.addLine("neon = ['Ne.txt']")
calibWith = "neon"
waveFiles = IO.list_file_to_strings('Ne.txt')
if calibWith:
# we have either Argon, or Neon, or both, so we can use arcs for the reduction
self.addLine("Wavelength.imcombine("+str(calibWith)+", maskname, band, waveops)")
self.addLine("Wavelength.fit_lambda_interactively(maskname, band, "+str(calibWith)+",waveops,longslit=longslit, "+str(calibWith)+"=True, noninteractive=noninteractiveflag)")
self.addLine("Wavelength.fit_lambda(maskname, band, "+str(calibWith)+","+str(calibWith)+",waveops,longslit=longslit)")
self.addLine("Wavelength.apply_lambda_simple(maskname, band, "+str(calibWith)+", waveops, longslit=longslit, smooth=True)")
self.waveName = "lambda_solution_"+str(Wavelength.filelist_to_wavename(waveFiles, self.band, self.maskName,""))
else:
# we have no arcs. For the time being, we can try with sky lines but this only works with long2pos specphot
print("#####################################################################################################")
print("WARNING: There are no arc calibration files")
print(" The pipeline will try to use sky lines but this only works if the observation is long enough")
print(" and if you are only using long2pos. It will NOT work on long2pos_specphot")
print(" Please contact the MosfireDRP team to obtain a standard wavelength solution")
print("#####################################################################################################" )
self.addLine("obsfiles = obsfiles_posAnarrow + obsfiles_posCnarrow")
self.addLine("Wavelength.imcombine(obsfiles, maskname, band, waveops)")
self.addLine("Wavelength.fit_lambda_interactively(maskname, band, obsfiles ,waveops,longslit=longslit, noninteractive=noninteractiveflag)")
self.addLine("Wavelength.fit_lambda(maskname, band, obsfiles,obsfiles ,waveops,longslit=longslit)")
self.addLine("Wavelength.apply_lambda_simple(maskname, band, obsfiles, waveops, longslit=longslit, smooth=True)")
files = IO.list_file_to_strings(self.offsetFiles)
self.waveName = "lambda_solution_"+str(Wavelength.filelist_to_wavename(files, self.band, self.maskName,""))
self.addLine("")
self.addLine("Wavelength_file = '"+str(self.waveName)+"'")
self.addLine("")
def printBackground(self):
if self.type is 'long2pos_specphot':
for slit in ['posAnarrow','posCnarrow','posAwide','posCwide']:
self.addLine("Background.handle_background(obsfiles_"+str(slit)+",Wavelength_file,maskname,band,waveops, target=target_"+str(slit)+")")
if self.type is 'long2pos':
for slit in ['posAnarrow','posCnarrow']:
self.addLine("Background.handle_background(obsfiles_"+str(slit)+",Wavelength_file,maskname,band,waveops, target=target_"+str(slit)+")")
if self.type is 'slitmask':
self.addLine("Background.handle_background(obsfiles,Wavelength_file,maskname,band,waveops)")
if self.type is 'longslit':
self.addLine("Background.handle_background(obsfiles,Wavelength_file,maskname,band,waveops,target=target)")
self.addLine("")
def printRectification(self):
if self.type is 'slitmask':
self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles]')
self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles,waveops)')
if self.type is 'longslit':
self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles]')
self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles,waveops, target=target)')
if self.type is 'long2pos' or self.type is 'long2pos_specphot':
for slit in ['posAnarrow','posCnarrow']:
self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles_'+str(slit)+']')
self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_'+str(slit)+',waveops, target=target_'+str(slit)+')')
if self.type is 'long2pos_specphot':
for slit in ['posAwide','posCwide']:
self.addLine('redfiles = ["eps_" + file + ".fits" for file in obsfiles_'+str(slit)+']')
self.addLine('redfiles = [redfiles[0]]')
self.addLine('Rectify.handle_rectification(maskname, redfiles,Wavelength_file,band,obsfiles_'+str(slit)+',waveops, target=target_'+str(slit)+')')
self.addLine("")
def printExtraction(self):
if self.type is 'slitmask':
self.addLine('Extract.extract_spectra(maskname, band, width=10, interactive=(not noninteractiveflag))')
elif self.type is 'longslit':
self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target, interactive=(not noninteractiveflag))')
elif self.type is 'long2pos':
self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target_posAnarrow, interactive=(not noninteractiveflag))')
self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target_posCnarrow, interactive=(not noninteractiveflag))')
elif self.type is 'long2pos_specphot':
self.addLine('Extract.extract_spectra(maskname, band, width=10, target=target, interactive=(not noninteractiveflag))')
def printHeader(self):
now = time.strftime("%c")
self.addLine("# Driver file automatically generated on "+str(now))
self.addLine("")
self.addLine("# If you have questions, please submit a ticket on the github issue page:")
self.addLine("# https://github.com/Keck-DataReductionPipelines/MosfireDRP/issues")
self.addLine("# Alternatively, email the developers at mosfiredrp@gmail.com")
self.addLine("")
def CloseFile(self):
self.target.close()
def OffsetPairs():
offsetFiles = glob.glob("Offset_*.txt")
# remove Offset_ and remote .txt
tmpOffsets = [off.replace("Offset_","").replace(".txt","") for off in offsetFiles]
if len(tmpOffsets) == 0:
return {}, ''
else:
# for each name, separate using _ as a separator
slitmaskOffset = []
processedTargets= []
targets_and_offsets= {}
for off in tmpOffsets:
# separate using _
off_array = off.split('_')
# the first value of the array is the offset value
# if the array has only one element, this is a slitmask (Offset_1.5.txt), add this value to the slitmask offsets
if len(off_array)==1:
type = 'slitmask'
if "slitmask" in targets_and_offsets:
tmp = targets_and_offsets["slitmask"]
tmp.append(float(off_array[0]))
targets_and_offsets["slitmask"]=tmp
else:
targets_and_offsets["slitmask"]=[float(off_array[0]),]
else:
# if the array has more than one element, we are in a long2pos or longslit mode
# if the last element is a PosC or PosA, then it's long2pos
#print off_array
if off_array[-1] in ['PosA','PosC']:
type = 'long2pos'
# check if we have already seen this target
tname = "_".join(off_array[1:-1])
# we are doing this for cases in which the file is Offset_-7_HIP87_7.25_PosA.txt (the _ in the file name is a problem)
else:
type = 'longslit'
tname = "_".join(off_array[1:])
if tname not in processedTargets:
# add the new target to the list
processedTargets.append(tname)
# add the current offset to the list of offsets files for this target
if tname in targets_and_offsets:
tmp=targets_and_offsets[tname]
tmp.append(float(off_array[0]))
#print("adding new offset to target "+str(tname))
targets_and_offsets[tname]=tmp
else:
#print("creating new offset set for target "+str(tname))
targets_and_offsets[tname]=[float(off_array[0]),]
return targets_and_offsets,type
def SetupFiles(target=None, offsets=None, type=None):
# convert numbers such as 1.0 to 1, but leaves 1.5 as 1.5
offsets = [int(f) if f % 1 ==0 else f for f in offsets]
setupLines = []
obsFiles = []
specphot = False
type=type
# slitmask
if type is 'slitmask':
offsets = [f for f in offsets if f>0]
for off in offsets:
obsFiles.append("Offset_"+str(off)+".txt")
obsFiles.append("Offset_"+str(off*-1)+".txt")
setupLines.append("obsfiles=['"+str("','".join(obsFiles))+"']")
elif type is 'longslit':
# files are assumed to be in pairs, and we drop the "0" value is present.
# remove negative and 0 offsets
offsets = [f for f in offsets if f>0]
for off in offsets:
obsFiles.append("Offset_"+str(off)+"_"+str(target)+".txt")
obsFiles.append("Offset_"+str(off*-1)+"_"+str(target)+".txt")
setupLines.append("obsfiles=['"+str("','".join(obsFiles))+"']")
setupLines.append('target="'+str(target)+'"')
elif type is 'long2pos' or type is 'long2pos_specphot':
# old long 2 pos (-7,-14,-21, 7,14,21)
# narrow slits
if set([-7,-21,7,21]).issubset(offsets):
setupLines.append("obsfiles_posCnarrow = ['Offset_-21_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']")
obsFiles.append("Offset_7_"+str(target)+"_PosA.txt") # we are using this to determine maskname and band
obsFiles.append("Offset_-7_"+str(target)+"_PosC.txt") # we are using this to determine maskname and band
setupLines.append('target_posCnarrow = "'+str(target)+'_POSC_NARROW"')
setupLines.append("IO.fix_long2pos_headers(obsfiles_posCnarrow)")
setupLines.append("obsfiles_posAnarrow = ['Offset_7_"+str(target)+"_PosA.txt', 'Offset_21_"+str(target)+"_PosA.txt']")
setupLines.append('target_posAnarrow = "'+str(target)+'_POSA_NARROW"')
setupLines.append("IO.fix_long2pos_headers(obsfiles_posAnarrow)")
# wide slits
if set([-7,-14,7,14]).issubset(offsets):
setupLines.append("obsfiles_posCwide = ['Offset_-14_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']")
setupLines.append('target_posCwide = "'+str(target)+'_POSC_WIDE"')
setupLines.append("IO.fix_long2pos_headers(obsfiles_posCwide)")
setupLines.append("obsfiles_posAwide = ['Offset_14_"+str(target)+"_PosA.txt', 'Offset_-7_"+str(target)+"_PosA.txt']")
setupLines.append('target_posAwide = "'+str(target)+'_POSA_WIDE"')
setupLines.append("IO.fix_long2pos_headers(obsfiles_posAwide)")
specphot = True
# new long 2 pos (-7,0,7)
# narrow slits
if set([-7,7]).issubset(offsets) and not(set([21,21]).issubset(offsets)):
setupLines.append("obsfiles_posCnarrow = ['Offset_7_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']")
obsFiles.append("Offset_7_"+str(target)+"_PosA.txt")
obsFiles.append("Offset_-7_"+str(target)+"_PosC.txt") # we are using this to determine maskname and band
setupLines.append('target_posCnarrow = "'+str(target)+'_POSC_NARROW"')
setupLines.append("obsfiles_posAnarrow = ['Offset_7_"+str(target)+"_PosA.txt', 'Offset_-7_"+str(target)+"_PosA.txt']")
setupLines.append('target_posAnarrow = "'+str(target)+'_POSA_NARROW"')
# wide slits
if set([-7,0,7]).issubset(offsets):
setupLines.append("obsfiles_posCwide = ['Offset_0_"+str(target)+"_PosC.txt', 'Offset_-7_"+str(target)+"_PosC.txt']")
setupLines.append('target_posCwide = "'+str(target)+'_POSC_WIDE"')
setupLines.append("obsfiles_posAwide = ['Offset_0_"+str(target)+"_PosA.txt', 'Offset_-7_"+str(target)+"_PosA.txt']")
setupLines.append('target_posAwide = "'+str(target)+'_POSA_WIDE"')
specphot=True
return setupLines, obsFiles, specphot
#set noninteractive variable
if len(sys.argv) > 3:
print("Usage: mospy AutoDriver [True|False]")
sys.exit()
noninteractiveval=False
if len(sys.argv) == 3:
if str(sys.argv[2]) in ("t", "T" "true", "True"):
noninteractiveval=True
elif str(sys.argv[2]) in ("f", "F" "false", "False"):
noninteractiveval=False
else:
print("Usage: mospy AutoDriver [True|False]")
sys.exit()
targets_and_offsets,type = OffsetPairs()
if 'slitmask' in targets_and_offsets:
print("slitmask mode")
mydriver=Driver("Driver.py","slitmask")
mydriver.printHeader()
obsLines,obsFiles,specphot = SetupFiles('slitmask',targets_and_offsets['slitmask'],type)
mydriver.addOffsetFiles(obsFiles)
mydriver.printMaskAndBand()
mydriver.printnoninteractive(noninteractive=noninteractiveval)
mydriver.printObsfiles(obsLines)
mydriver.printFlat()
mydriver.printWavelengthFit()
mydriver.printBackground()
mydriver.printRectification()
mydriver.printExtraction()
mydriver.CloseFile()
elif type is 'long2pos' or type is 'longslit':
Targets = list(targets_and_offsets.keys())
for target in Targets:
print(str(type)+" mode")
obsLines,obsFiles,specphot = SetupFiles(target,targets_and_offsets[target],type)
if type is 'longslit':
mydriver=Driver("Longslit_"+str(target)+".py","longslit")
elif specphot:
mydriver=Driver("Long2pos_"+str(target)+".py","long2pos_specphot")
else:
mydriver=Driver("Long2pos_"+str(target)+".py","long2pos")
mydriver.printHeader()
mydriver.addOffsetFiles(obsFiles)
mydriver.printMaskAndBand()
mydriver.printnoninteractive(noninteractive=noninteractiveval)
mydriver.printObsfiles(obsLines)
mydriver.addLongslit()
mydriver.printFlat()
mydriver.printWavelengthFit()
mydriver.printBackground()
mydriver.printRectification()
mydriver.printExtraction()
mydriver.CloseFile()
else:
print('No data found in Offsets*txt files. No driver file generated')
| 24,849 | 51.536998 | 244 | py |
MosfireDRP | MosfireDRP-master/scripts/mospy_handle.py | #!/usr/local/bin/python
'''
MOSFIRE 'handle' command:
(c) npk - Dec 2013
'''
import MOSFIRE.IO as IO
import os
import numpy as np
import sys
import glob
from MOSFIRE.MosfireDrpLog import debug, info, warning, error
if len(sys.argv) < 3:
print('''Usage: mospy handle [target]''')
sys.exit()
## Output the file list to a text file for later examination
if os.path.exists('filelist.txt'):
debug('Removing old filelist.txt')
os.remove('filelist.txt')
fl = open('filelist.txt', 'w')
files = []
for i in range(1, len(sys.argv)):
files.extend(glob.iglob(os.path.abspath(sys.argv[i])))
files = [file for file in files if os.path.splitext(file)[1] != '.original']
masks = {}
info('Examining {} files'.format(len(files)))
for fname in files:
try:
header = IO.readheader(fname)
except IOError:#, err:
fl.write("Couldn't IO %s\n" % fname)
continue
except:
fl.write("%s is unreadable\n" % fname)
continue
lamps = ""
try:
if header["pwstata7"] == 1:
lamps += header["pwloca7"][0:2]
if header["pwstata8"] == 1:
lamps += header["pwloca8"][0:2]
except KeyError:
lamps = "???"
header['lamps'] = lamps
try:
if header["aborted"]:
header['object' ] = 'ABORTED'
except:
fl.write("Missing header file in: %s\n" % fname)
try:
fl.write("%(datafile)12s %(object)35s %(truitime)6.1fs %(maskname)35s %(lamps)3s %(filter)4s %(mgtname)7s\n" % (header))
except:
try:
fl.write("%(datafile)12s %(object)25s %(truitime)6.1fs %(lamps)3s %(filter)6s %(mgtname)7s\n" % (header))
except:
fl.write("%s Skipped\n" % fname)
continue
datafile = header['datafile'] + '.fits'
maskname = str(header['maskname'])
target = str(header['targname'])
filter = header['filter']
yr,mn,dy = IO.fname_to_date_tuple(datafile)
date = str(yr)+mn+str(dy)
object = header['object']
frameid = header['FRAMEID'].strip()
itime = header['truitime']
grating_turret = header['mgtname']
if object.find("MIRA") == -1:
mira = False
else:
mira = True
if header['MGTNAME'] is not 'mirror':
mira = False
if maskname.find(" (align)") == -1:
align = False
else:
maskname = maskname.replace(" (align)", "")
align = True
if maskname.find('LONGSLIT') != -1:
# print("longslit file")
align = False
if maskname.find('long2pos') != -1:
if grating_turret != 'mirror':
align = False
empty_files = {'Align': [], 'Ne': [], 'Ar': [], 'Flat': [], 'FlatThermal': [],
'Dark': [], 'Aborted': [], 'Image': [], 'MIRA': [], 'Unknown': []}
if maskname not in masks:
masks[maskname] = {date: {filter: empty_files}}
if date not in masks[maskname]:
masks[maskname][date] = {filter: empty_files}
if filter not in masks[maskname][date]:
masks[maskname][date][filter] = empty_files
# convert numbers such as 1.0 to 1, but leaves 1.5 as 1.5
# - added to match AutoDriver.py code
offset_hdr = float(header['YOFFSET'])
if offset_hdr % 1 == 0:
offsetvalue = int(offset_hdr)
else:
offsetvalue = offset_hdr
offset = 'Offset_' + str(offsetvalue)
if (maskname.find('long2pos') != -1 and align is False) or maskname.find('LONGSLIT') != -1:
# if the target name contains a /, replace it with _
target_name = target.replace("/","_")
# if the target name contains a space, remove it
target_name = target_name.replace(" ","")
# add a posC and posA to the offset names
position = ''
if header['XOFFSET']>0:
position = 'PosC'
if header['XOFFSET']<0:
position = 'PosA'
offset = offset+'_'+str(target_name)
if position is not '':
offset = offset+'_'+position
if mira:
masks[maskname][date][filter]['MIRA'].append(fname)
elif align:
masks[maskname][date][filter]['Align'].append(fname)
elif 'Ne' in header['lamps']:
masks[maskname][date][filter]['Ne'].append(fname)
elif 'Ar' in header['lamps']:
masks[maskname][date][filter]['Ar'].append(fname)
elif header['ABORTED']:
masks[maskname][date][filter]['Aborted'].append(fname)
elif header['FILTER'] == 'Dark':
masks[maskname][date][filter]['Dark'].append(fname)
elif header['FLATSPEC'] == 1:
masks[maskname][date][filter]['Flat'].append(fname)
elif object.find("Flat:") != -1 and ( object.find("lamps off") != -1 or object.find("Flat:Off")) != -1 :
masks[maskname][date][filter]['FlatThermal'].append(fname)
elif header['mgtname'] == 'mirror':
masks[maskname][date][filter]['Image'].append(fname)
elif offset != 0:
# print "offset is now:"+str(offset)
if frameid in ["A", "B", "A'", "B'","D","C", "E"]:
if offset in masks[maskname][date][filter]:
masks[maskname][date][filter][offset].append((fname, itime))
# print("adding file to existing offset file")
else:
masks[maskname][date][filter][offset] = [(fname, itime)]
# print("creating new offset file")
else:
fl.write('{} has unexpected FRAMEID: {}\n'.format(fname, frameid))
else:
masks[maskname][date][filter]['Unknown'].append(fname)
##### Now handle mask dictionary
def descriptive_blurb():
import getpass, time
uid = getpass.getuser()
date = time.asctime()
return "# Created by '%s' on %s\n" % (uid, date)
# Write out the list of files in filepath
# list = ['/path/to/mYYmmDD_####.fits' ...]
# filepath is absolute path to the file name to write to
#
# Result, is a file called filepath is written with
# fits files in the list.
def handle_file_list(output_file, files):
'''Write a list of paths to MOSFIRE file to output_file.'''
if os.path.isfile(output_file):
print("%s: already exists, skipping" % output_file )
# pass
if len(files) > 0:
with open(output_file, "w") as f:
f = open(output_file, "w")
f.write(descriptive_blurb())
picker = lambda x: x
if len(files[0]) == 2: picker = lambda x: x[0]
# Identify unique path to files:
paths = [os.path.dirname(picker(file)) for file in files]
paths = list(set(paths))
if len(paths) == 1:
path_to_all = paths[0]
converter = os.path.basename
f.write("%s # Abs. path to files [optional]\n" % path_to_all)
else:
converter = lambda x: x
info('Writing {} files to {}'.format(len(files), output_file))
for path in files:
if len(path) == 2:
to_write = "%s # %s s\n" % (converter(path[0]), path[1])
else:
to_write = "%s\n" % converter(path)
f.write("%s" % to_write)
def handle_date_and_filter(mask, date, filter, mask_info):
path = os.path.join(mask,date,filter)
try:
os.makedirs(path)
except OSError:
pass
for type in list(mask_info.keys()):
handle_file_list(os.path.join(path, type + ".txt"), mask_info[type])
for mask in list(masks.keys()):
for date in list(masks[mask].keys()):
for filter in list(masks[mask][date].keys()):
handle_date_and_filter(mask, date, filter, masks[mask][date][filter])
| 7,660 | 29.644 | 128 | py |
MosfireDRP | MosfireDRP-master/scripts/mospy_what.py | #!/usr/local/bin/python
'''
MOSFIRE 'what' command:
Spits out an informative summary of files
in the current directory. Or files selected
via a glob.
i.e. what *0311.fits
npk - March 23 2011
'''
import MOSFIRE
import MOSFIRE.IO
import glob
import sys
files = []
if len(sys.argv) == 1:
files = glob.iglob('*')
else:
for i in range(1, len(sys.argv)):
files.extend(glob.iglob(sys.argv[i]))
#print("filename object exptime maskname lamp filt Turret")
for fname in files:
try:
header = MOSFIRE.IO.readheader(fname)
except IOError, err:
print("Couldn't IO %s" % fname)
continue
except:
print("%s is unreadable" % fname)
continue
lamps = ""
try:
if header["pwstata7"] == 1:
lamps += header["pwloca7"][0:2]
if header["pwstata8"] == 1:
lamps += header["pwloca8"][0:2]
except KeyError:
lamps = "???"
header.update("lamps", lamps)
try:
if header["aborted"]:
header.update("object", "ABORTED")
except:
print("Missing header file in: %s" % fname)
try:
print("%(datafile)12s %(object)40s %(truitime)6.1f s %(maskname)35s %(lamps)3s %(filter)4s %(mgtname)7s" % (header))
except:
try:
print("%(datafile)12s %(object)25s %(truitime)6.1f s %(lamps)3s %(filter)6s %(mgtname)7s" % (header))
except:
print("%s Skipped" % fname)
| 1,491 | 21.268657 | 124 | py |
MosfireDRP | MosfireDRP-master/scripts/mospy_db.py |
import time
import traceback
import getpass
import os
import pdb
import pprint
import sets
import sqlite3
import sys
import textwrap
from operator import itemgetter
from itertools import groupby
import MOSFIRE
from MOSFIRE import Options, IO
def load_db():
indir = Options.indir
outname = os.path.join(Options.outdir, "mosfire_files.db")
print("Database: {0}".format(outname))
conn = sqlite3.connect(outname)
return conn
def create(cursor):
cursor.execute('''
CREATE TABLE if not exists files
(id integer primary key, path text, fdate text, number integer)
''')
keys = []
def append_column(cursor, name, typename):
qry = "alter table files\nadd {0} {1}".format(name, typename)
try:
cursor.execute(qry)
print("Added {0} as {1}".format(name, typename))
except sqlite3.OperationalError:
pass
def make():
"""Make the database"""
db = load_db()
c = db.cursor()
create(c)
dirs = os.walk(Options.indir)
Options.indir = Options.indir.rstrip("/")
for root, dirs, files in dirs:
if root == Options.indir: continue
ignore, path = root.split(Options.indir)
if len(path.split("/")) != 2: continue
try: date = int(path.split("/")[1][0:4])
except: continue
if (date < 2012) or (date > 2030): continue
for file in files:
if len(file) != 17: continue
p = os.path.join(root, file)
num = db.execute('select count(*) from files where path = "%s"' %
p).fetchall()
if num[0][0] > 0:
print("Skipping: " + p + " [already in db]")
continue
print(p)
hdr = IO.readheader(p)
try:
fdate = file.split("_")[0][1:]
number = file.split("_")[1][:-5]
except:
print("Skipping: " + p)
continue
insert_sql = "insert into files(path,fdate,number,"
vals = "?,?,?,"
values = [p, fdate, number]
for key in list(hdr.keys()):
if key == 'COMMENT': continue
value = hdr[key]
T = type(value)
key = key.replace("-","_")
insert_sql += key + ","
vals += "?,"
values.append(value)
if key in keys: continue
keys.append(key)
if T == int: typename = 'integer'
if T == float: typename = 'real'
else: typename = 'text'
append_column(c, key, typename)
insert_sql = insert_sql[:-1] + ") values (" + vals[:-1] + ")"
try:
c.execute(insert_sql, tuple(values))
except:
print("Query failed on:")
print(insert_sql)
traceback.print_exc()
#sys.exit()
db.commit()
def find_continuous(data):
'''Find all continuous numbers in a list'''
# http://stackoverflow.com/questions/2154249/identify-groups-of-continuous-numbers-in-a-list
ranges = []
for k, g in groupby(enumerate(data), lambda (i,x):i-x):
group = list(map(itemgetter(1), g))
ranges.append((group[0], group[-1]))
return ranges
def underline_ansi(str):
return chr(27) + '[4m' + str + chr(27) + '[0m'
def bold_ansi(str):
return chr(27) + '[1m' + str + chr(27) + '[0m'
def boldunderline_ansi(str):
return chr(27) + '[1m' + chr(27) + '[4m' + str + chr(27) + '[0m'
def sql_for_mask_group_filter(db, maskname):
cur = db.execute(
'''
select count(filter), filter, itime/1000.0, yoffset
from files
where maskname = "{0}" and substr(obsmode, -12, 12) = "spectroscopy"
group by filter'''.
format(maskname))
return cur.fetchall()
def sql_for_mask_filter_flats(db, maskname, filter):
query = '''
select path, fdate, number
from files
where maskname = "{0}" and substr(obsmode, -12, 12) = "spectroscopy" and
filter = "{1}" and (el-45) < .1 and flatspec = 1
order by fdate, number
'''.format(maskname, filter)
print("Flat Query is:", query)
cursor = db.execute(query)
return cursor.fetchall()
def sql_for_mask_filter_spectra(db, maskname, filter):
query = '''
select fdate
from files
where maskname = "{0}" and substr(obsmode, -12, 12) = "spectroscopy" and
filter = "{1}" and (itime/1000.0) > 30 and flatspec = 0 and (domestat =
"tracking" or domestat = 0) and aborted = 0
group by fdate
'''.format(maskname, filter)
#print("DB Query is: ", query
cur = db.execute(query)
return cur.fetchall()
def sql_for_mask_filter_date(db, maskname, filter, date):
query = '''
select path, fdate, number, yoffset, itime/1000.0
from files
where maskname = "{0}" and filter = "{1}" and (itime/1000.0) > 30 and
fdate = {2} and flatspec = 0 and (domestat = "tracking" or
domestat = 0) and aborted = 0
order by fdate, number
'''.format(maskname, filter, date)
print("DB Query is: ", query)
cur = db.execute(query)
return cur.fetchall()
def plan_to_fname(plan):
return "%s_%s.py" % (plan["maskname"], plan["filter"])
longslit_plan_file ='''
# This file was automatically generated by the mospy db application
# The application was executed by {uid} on {createdate}
#
# Help, bugs to: http://mosfire.googlecode.com
#
# Instructions
# 1. edit band = 'fixme' to band = 'Y' or 'J' or 'H' or 'K'
# e.g. band = 'J'
# 2. edit range(a,b) to be a list of flat names
# 3. edit range(c,d) to be a list of long names
# Note for steps 2&3 most likely these will be a continuous sequence
# 4. edit [709, 1350] to be the pixel values at the beginning and end
# of the long slit. Look at the raw data.
import os, time
import MOSFIRE
from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify
from MOSFIRE import Wavelength, Longslit
import numpy as np
from matplotlib import pyplot as pl
try:
from astropy.io import fits as pf
except:
import pyfits as pf
np.seterr(all="ignore")
maskname = '{maskname}'
band = '{band}'
flatnames = {flatnames}
longnames = {longnames}
flatops = Options.flat
waveops = Options.wavelength
{lslitoptions}
Flats.handle_flats(flatnames, maskname, band, flatops)
Wavelength.imcombine(longnames, maskname, band, waveops)
Wavelength.fit_lambda_interactively(maskname, band, longnames, waveops)
Wavelength.fit_lambda(maskname, band, longnames, longnames, waveops,
longslit=longslit)
Wavelength.apply_lambda_simple(maskname, band, longnames, waveops,
longslit=longslit, smooth=True)
Longslit.go(maskname, band, longnames, waveops, longslit)
'''
plan_file ='''
# This file was automatically generated by the mospy db application
# The application was executed by {uid} on {createdate}
#
# Help, bugs to: http://mosfire.googlecode.com
import os, time
import MOSFIRE
from MOSFIRE import Background, Combine, Detector, Flats, IO, Options, Rectify
from MOSFIRE import Wavelength
import numpy as np
from matplotlib import pyplot as pl
try:
from astropy.io import fits as pf
except:
import pyfits as pf
np.seterr(all="ignore")
maskname = '{maskname}'
band = '{band}'
num_dates = {num_dates}
flatnames = {flatnames}
sciframes = {sciframes}
wavenames = {wavenames}
flatops = Options.flat
waveops = Options.wavelength
Flats.handle_flats(flatnames, maskname, band, flatops)
{wavecombine}
Combine.handle_combine(wavenames, maskname, band, waveops)
'''
def plan_to_python(plans):
'''Convert the python list/dictionary created by masks() into a python
script'''
'''
A plan is a structure that looks something like
Plan {
filter -> Filter name (string)
maskname -> maskname (string)
flatlist ->
["YYmmDD_####"...] list of flats
dates -> [{
date -> YYmmDD (string)
observations -> [{
observation -> (a,b) (tuple of file number range)
offsets [{
"name" -> ["YYmmDD_####" ...] list of sci frames at offset
"name"
}]
}]
}]
}
This function unpacks the above structure into a python program that will
produce a data reduction plan file.
'''
for plan in plans:
fname = plan_to_fname(plan)
if os.path.exists(fname):
#print("Plan '%s' already exists, remove the plan file "
#"to overwrite" % fname)
os.remove(fname)
#REMOVE COMMENT BELOW:
#continue
outf = open(fname, "w")
num_dates = len(plan["dates"])
waves = []
scis = []
for date in plan["dates"]:
for observation in date["observations"]:
obs_wave = []
obs_sci = {}
offsets = list(observation["offsets"].keys())
if (len(offsets) == 1) and offsets[0] is 'Unknown':
fnames = observation["offsets"]['Unknown']['fname']
obs_sci["A"] = fnames[0:-1:2]
obs_sci["B"] = fnames[1:-1:2]
obs_wave.extend(fnames)
else:
for offset in offsets:
fnames = observation["offsets"][offset]['fname']
obs_sci[offset] = fnames
obs_wave.extend(fnames)
scis.append(obs_sci)
waves.append(obs_wave)
wavecombine = ""
for i in range(len(waves)):
wavecombine += "Wavelength.imcombine(wavenames[%i], maskname, " \
"band, waveops)\n" % (i)
if i == 0:
wavecombine += "Wavelength.fit_lambda_interactively(" \
"maskname, band, wavenames[0], waveops)\n"
wavecombine += "Wavelength.fit_lambda(" \
"maskname, band, wavenames[%i], wavenames[0], " \
" waveops)\n" % i
wavecombine += "Wavelength.apply_lambda_simple(maskname, band, " \
" wavenames[%i], waveops)\n" % i
pos = list(scis[i].keys())
if len(pos) != 2:
print("Only handling A/B subtraction currently")
continue
wavecombine += \
"Background.handle_background(sciframes[%i]['%s'], " \
"sciframes[%i]['%s'], wavenames[%i], maskname, band, " \
"waveops)\n" % (i, pos[0], i, pos[1], i)
wavecombine += \
"Rectify.handle_rectification(maskname, ['A', 'B'], " \
"wavenames[%i], band, waveops)" % (i)
wavecombine += "\n"
res = { "uid": getpass.getuser(),
"createdate": time.asctime(),
"maskname": plan["maskname"],
"band": plan["filter"],
"flatnames": plan["flatlist"],
"sciframes": scis,
"wavenames": waves,
"wavecombine": wavecombine,
"num_dates": num_dates}
outf.write(plan_file.format(**res))
outf.close()
def longslits():
"""List all longslits"""
if len(sys.argv) == 4:
db = load_db()
fdate = int(sys.argv[3])
query = """
select object, path, fdate, number, filter, yoffset, maskname,
gratmode, itime, el
from files
where substr(maskname,0,9) == 'LONGSLIT' and fdate = "{0}"
order by number
""".format(fdate, fdate)
cur = db.execute(query)
ress = cur.fetchall()
if len(ress) == 0:
raise Exception("No such objects in database. Query:\n%s" % query)
return
print("{0}".format(ress[0][-1]))
print("{0}".format(object))
print("{0:6s} {1:6s} {2:3s} {3:6s} {4:4s} {5:15s}".format("type", "date", "num",
"band", "offset", "object"))
objs = {}
for res in ress:
obj, path, fdate, number, filter, yoffset, maskname, gratmode, exptime, el = res
guess = '?'
if gratmode == 'imaging':
guess = "align"
elif filter == 'Dark':
guess = 'dark'
elif filter == 'Moving':
guess = 'bad'
elif len(obj) > 4 and obj[0:4] == 'Flat':
guess = 'flat'
key = "flat_{0}".format(filter)
else:
guess = "sci"
key = "{0}_{1}".format(obj,filter)
if guess == 'flat' or guess == 'sci':
if objs.has_key(key):
objs[key].append(path)
else:
objs[key] = [path]
if res[5] is None: offset = -999
else: offset = float(res[5])
print("{0:6s} {1:6s} {2:3g} {3:6s} {4:5.1f} {5:15s}".format(guess, res[2],
res[3], res[4], offset, obj))
print("")
print("--- SUMMARY ---")
for key, value in objs.items():
print("{0:10s}: {1:5g} frames".format(key, len(value)))
else:
print("Not enough arguments")
sys.exit()
res = {
"uid": getpass.getuser(),
"createdate": time.asctime(),
'maskname': "longslit_%s" % (fdate),
'band' : 'fixme',
'flatnames': "['m%s_%%4.4i.fits' %% i for i in range(a,b)]" % (fdate),
'longnames': "['m%s_%%4.4i.fits' %% i for i in range(c,d)]" % (fdate),
'lslitoptions': "longslit = {'yrange': [709, 1350]}"
}
fout = "%s_longslit.py" % fdate
try:
f = open(fout, "w")
f.write(longslit_plan_file.format(**res))
f.close()
except:
print("Could not open and write to {0}".format(fout))
def masks():
"""List all slit masks"""
db = load_db()
if len(sys.argv) == 3:
cur = db.execute("select maskname, count(maskname) from files group by maskname")
ress = cur.fetchall()
print("{0:74s} {1:5s}".format("Mask Name", "Count"))
print("-"*80)
bold_on = False
for res in ress:
output = "{0:74s} {1:5g}".format(res[0], res[1])
if bold_on: print bold_ansi(output)
else: print output
bold_on = not bold_on
print
print('''Execute:
mospy db masks [maskname]
to generate a mask plan''')
if len(sys.argv) == 4:
maskname = sys.argv[3]
FILTERS = sql_for_mask_group_filter(db, maskname)
plans = []
for res in FILTERS:
num_frames, filter, itime, yoffset = res
if yoffset is None: yoffset='Unknown'
this_plan = {"maskname": maskname, "filter": filter}
print
print(boldunderline_ansi("{0:45s} {1:4s}".format(maskname, filter)))
if filter == 'Dark':
print(" Dark frames not fully supported yet")
continue
FL = sql_for_mask_filter_flats(db, maskname, filter)
print("%i flats on %i nights " % (len(FL), len(set([str(S[1]) for
S in FL]))))
this_plan["flatlist"] = [str("m%s_%4.4i.fits" % (f[1],f[2])) for f
in FL]
DATES = sql_for_mask_filter_spectra(db, maskname, filter)
this_plan["dates"] = []
for date in DATES:
date = date[0]
this_date = {"date": date}
FRAMES = sql_for_mask_filter_date(db, maskname, filter, date)
print(underline_ansi("{0}: {1} frames:".format(date,
len(FRAMES))))
nums = [int(S[2]) for S in FRAMES]
observations = find_continuous(nums)
this_date["observations"] = []
for observation in observations:
this_observation = {"observation": observation}
offsets = {}
for frame in FRAMES:
path, fdate, number, yoffset, itime = frame
if yoffset is None: yoffset = "Unknown"
if (number < observation[0]) or (number >
observation[1]):
continue
if float(yoffset) == 0: pdb.set_trace()
if offsets.has_key(yoffset):
offsets[yoffset]["fname"].append(
str("m%s_%4.4i.fits" % (fdate,number)))
offsets[yoffset]["itime"] += itime
else:
offsets[yoffset] = {}
offsets[yoffset]["fname"] = [
str("m%s_%4.4i.fits" %
(fdate, number))]
offsets[yoffset]["itime"] = itime
offsets[yoffset]["start/stop"] = observation
this_observation["offsets"] = offsets
this_date["observations"].append(this_observation)
for observation in this_date["observations"]:
for k,v in observation["offsets"].items():
print("\tOffset {0:5s} has {1:3g} frames ({2}-{3}) "
"total exptime is {4:5g} s".format(str(k),
len(v["fname"]), v["start/stop"][0],
v["start/stop"][1], v["itime"]))
this_plan["dates"].append(this_date)
plans.append(this_plan)
plan_to_python(plans)
commands = [make, masks, longslits]
def usage():
print(""")
Commands: """
for command in commands:
print("\t" + command.__name__ + ": " + command.__doc__)
print("\n")
if __name__ == '__main__':
if len(sys.argv) < 3:
usage()
sys.exit()
if sys.argv[2] == 'make':
print("Making database")
make()
if sys.argv[2] == 'masks':
masks()
if sys.argv[2] == 'longslits':
longslits()
else:
usage()
sys.exit()
| 18,595 | 27.653313 | 96 | py |
MosfireDRP | MosfireDRP-master/docs/conf.py | # -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
#
# Astropy documentation build configuration file.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this file.
#
# All configuration values have a default. Some values are defined in
# the global Astropy configuration which is loaded here before anything else.
# See astropy.sphinx.conf for which values are set there.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('..'))
# IMPORTANT: the above commented section was generated by sphinx-quickstart, but
# is *NOT* appropriate for astropy or Astropy affiliated packages. It is left
# commented out with this explanation to make it clear why this should not be
# done. If the sys.path entry above is added, when the astropy.sphinx.conf
# import occurs, it will import the *source* version of astropy instead of the
# version installed (if invoked as "make html" or directly with sphinx), or the
# version in the build directory (if "python setup.py build_sphinx" is used).
# Thus, any C-extensions that are needed to build the documentation will *not*
# be accessible, and the documentation will not build correctly.
import datetime
import os
import sys
try:
import astropy_helpers
except ImportError:
# Building from inside the docs/ directory?
if os.path.basename(os.getcwd()) == 'docs':
a_h_path = os.path.abspath(os.path.join('..', 'astropy_helpers'))
if os.path.isdir(a_h_path):
sys.path.insert(1, a_h_path)
# Load all of the global Astropy configuration
from astropy_helpers.sphinx.conf import *
# Get configuration information from setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read([os.path.join(os.path.dirname(__file__), '..', 'setup.cfg')])
setup_cfg = dict(conf.items('metadata'))
# -- General configuration ----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.2'
# To perform a Sphinx version check that needs to be more specific than
# major.minor, call `check_sphinx_version("x.y.z")` here.
# check_sphinx_version("1.2.1")
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns.append('_templates')
# This is added to the end of RST files - a good place to put substitutions to
# be used globally.
rst_epilog += """
"""
# -- Project information ------------------------------------------------------
# This does not *have* to match the package name, but typically does
project = setup_cfg['package_name']
author = setup_cfg['author']
copyright = '{0}, {1}'.format(
datetime.datetime.now().year, setup_cfg['author'])
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
__import__(setup_cfg['package_name'])
package = sys.modules[setup_cfg['package_name']]
# The short X.Y version.
version = package.__version__.split('-', 1)[0]
# The full version, including alpha/beta/rc tags.
release = package.__version__
# -- Options for HTML output ---------------------------------------------------
# A NOTE ON HTML THEMES
# The global astropy configuration uses a custom theme, 'bootstrap-astropy',
# which is installed along with astropy. A different theme can be used or
# the options for this theme can be modified by overriding some of the
# variables set in the global configuration. The variables set in the
# global configuration are listed below, commented out.
# Add any paths that contain custom themes here, relative to this directory.
# To use a different custom theme, add the directory containing the theme.
#html_theme_path = []
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes. To override the custom theme, set this to the
# name of a builtin theme or the name of a custom theme in html_theme_path.
#html_theme = None
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = ''
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = ''
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = '{0} v{1}'.format(project, release)
# Output file base name for HTML help builder.
htmlhelp_basename = project + 'doc'
# -- Options for LaTeX output --------------------------------------------------
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [('index', project + '.tex', project + u' Documentation',
author, 'manual')]
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [('index', project.lower(), project + u' Documentation',
[author], 1)]
## -- Options for the edit_on_github extension ----------------------------------------
if eval(setup_cfg.get('edit_on_github')):
extensions += ['astropy.sphinx.ext.edit_on_github']
versionmod = __import__(setup_cfg['package_name'] + '.version')
edit_on_github_project = setup_cfg['github_project']
if versionmod.version.release:
edit_on_github_branch = "v" + versionmod.version.version
else:
edit_on_github_branch = "master"
edit_on_github_source_root = ""
edit_on_github_doc_root = "docs"
| 6,191 | 38.43949 | 87 | py |
bcts | bcts-main/main.py | import argparse
from argparse import Namespace
from datetime import datetime
import random
import torch
from tqdm import tqdm
import wandb
import sys
sys.path.append("../Rainbow/")
from bcts_agent import BCTSAgent
from env import Env
from cule_env import CuleEnv
from memory import ReplayMemory
from test import test
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
parser = argparse.ArgumentParser(description='Rainbow')
parser.add_argument('--seed', type=int, default=123, help='Random seed')
parser.add_argument('--disable-cuda', type=str2bool, default=False, nargs='?',
const=True, help='Disable CUDA')
# parser.add_argument('--disable-cuda', action='store_true', help='Disable CUDA')
parser.add_argument('--game', type=str, default='breakout', help='ATARI game')
parser.add_argument('--T-max', type=int, default=int(50e6), metavar='STEPS',
help='Number of training steps (4x number of frames)')
parser.add_argument('--max-episode-length', type=int, default=int(108e3), metavar='LENGTH',
help='Max episode length (0 to disable)')
parser.add_argument('--history-length', type=int, default=4, metavar='T', help='Number of consecutive states processed')
parser.add_argument('--hidden-size', type=int, default=512, metavar='SIZE', help='Network hidden size')
parser.add_argument('--noisy-std', type=float, default=0.1, metavar='σ',
help='Initial standard deviation of noisy linear layers')
parser.add_argument('--atoms', type=int, default=51, metavar='C', help='Discretised size of value distribution')
parser.add_argument('--V-min', type=float, default=-10, metavar='V', help='Minimum of value distribution support')
parser.add_argument('--V-max', type=float, default=10, metavar='V', help='Maximum of value distribution support')
parser.add_argument('--model', type=str, metavar='PARAMS', help='Pretrained model (state dict)')
parser.add_argument('--memory-capacity', type=int, default=int(1e6), metavar='CAPACITY',
help='Experience replay memory capacity')
parser.add_argument('--replay-frequency', type=int, default=4, metavar='k', help='Frequency of sampling from memory')
parser.add_argument('--priority-exponent', type=float, default=0.5, metavar='ω',
help='Prioritised experience replay exponent (originally denoted α)')
parser.add_argument('--priority-weight', type=float, default=0.4, metavar='β',
help='Initial prioritised experience replay importance sampling weight')
parser.add_argument('--multi-step', type=int, default=3, metavar='n', help='Number of steps for multi-step return')
parser.add_argument('--discount', type=float, default=0.99, metavar='γ', help='Discount factor')
parser.add_argument('--target-update', type=int, default=int(32e3), metavar='τ',
help='Number of steps after which to update target network')
parser.add_argument('--reward-clip', type=int, default=1, metavar='VALUE', help='Reward clipping (0 to disable)')
parser.add_argument('--lr', type=float, default=0.0000625, metavar='η', help='Learning rate')
parser.add_argument('--adam-eps', type=float, default=1.5e-4, metavar='ε', help='Adam epsilon')
parser.add_argument('--batch-size', type=int, default=32, metavar='SIZE', help='Batch size')
parser.add_argument('--learn-start', type=int, default=int(80e3), metavar='STEPS',
help='Number of steps before starting training')
parser.add_argument('--evaluate', type=str2bool, default=True, nargs='?',
const=True, help='Evaluate only')
parser.add_argument('--evaluation-interval', type=int, default=100000, metavar='STEPS',
help='Number of training steps between evaluations')
parser.add_argument('--evaluation-episodes', type=int, default=150, metavar='N',
help='Number of evaluation episodes to average over')
parser.add_argument('--evaluation-size', type=int, default=500, metavar='N',
help='Number of transitions to use for validating Q')
parser.add_argument('--log-interval', type=int, default=25000, metavar='STEPS',
help='Number of training steps between logging status')
parser.add_argument('--render', type=str2bool, default=True, nargs='?',
const=True, help='Display screen (testing only)')
parser.add_argument('--tree-depth', type=int, default=0, metavar='N', help='Depth of Cule tree')
parser.add_argument('--use_cule', type=str2bool, default=True, nargs='?',
const=True, help='Choose whether to use cule')
parser.add_argument('--use_pretrained', type=str2bool, default=True, nargs='?',
const=True, help='Choose whether to load the pretrained model')
parser.add_argument('--tree_top_percentile', type=str2bool, default=False, nargs='?',
const=True, help='Choose whether to use tree_top_percentile')
parser.add_argument('--op-scale-factor', type=float, default=1.025, metavar='w',
help='Off-policy scale factor for tree correction')
parser.add_argument('--override-default-op-factor', type=str2bool, default=False, nargs='?',
const=True, help='Whether to override the default op_factor dict per game')
parser.add_argument('--multiplicative-op-factor', type=str2bool, default=True, nargs='?',
const=True, help='Choose whether to use a multiplicative or additive op_factor')
parser.add_argument('--prunning-std-thresh', type=float, default=100, metavar='w',
help='Best-to-second-best std threshold for tree prunning')
def rename_env(snake_env):
def to_camel_case(snake_str):
components = snake_str.split('_')
# We capitalize the first letter of each component except the first one
# with the 'title' method and join them together.
return ''.join(x.title() for x in components)
return to_camel_case(snake_env) + 'NoFrameskip-v4'
# Setup
args = parser.parse_args()
args.evaluation_interval = 35000
args.render = False
full_env_name = rename_env(args.game)
args.model = 'pretrained/{}.pth'.format(args.game) if args.use_pretrained else ''
if args.tree_depth > 0:
args.use_cule = True
print(' ' * 26 + 'Options')
for k, v in vars(args).items():
print(' ' * 26 + k + ': ' + str(v))
wandb.init(config=args, project="rainbow")
args = Namespace(**wandb.config.as_dict())
random.seed(args.seed)
torch.manual_seed(random.randint(1, 10000))
if torch.cuda.is_available() and not args.disable_cuda:
args.device = torch.device('cuda')
torch.cuda.manual_seed(random.randint(1, 10000))
torch.backends.cudnn.enabled = False # Disable nondeterministic ops (not sure if critical but better safe than sorry)
else:
args.device = torch.device('cpu')
# Simple ISO 8601 timestamped logger
def log(s):
print('[' + str(datetime.now().strftime('%Y-%m-%dT%H:%M:%S')) + '] ' + s)
# Environment
if args.use_cule:
env = CuleEnv(args, full_env_name=full_env_name)
else:
env = Env(args)
env.train()
action_space = env.action_space()
# Agent
dqn = BCTSAgent(args, env, full_env_name=full_env_name)
mem = ReplayMemory(args, args.memory_capacity)
priority_weight_increase = (1 - args.priority_weight) / (args.T_max - args.learn_start)
# Construct validation memory
val_mem = ReplayMemory(args, args.evaluation_size)
T, done = 0, True
while T < args.evaluation_size:
if done:
state, done = env.reset(), False
next_state, _, done = env.step(random.randint(0, action_space - 1))
val_mem.append(state, None, None, done)
state = next_state
T += 1
if args.evaluate:
dqn.eval() # Set DQN (online network) to evaluation mode
results = test(args, 0, dqn, val_mem, evaluate=True, env=env, env_name=full_env_name) # Test
print('Avg. reward: {} +/- {}'.format(results['avg_reward'], results['std_rew']))
print('0.25 quantile: {}, 0.75 quantile: {}'.format(results['first_quantile'], results['last_quantile']))
wandb.log(results)
else:
# Training loop
dqn.train()
done = True
# while T < args.T_max:
for T in tqdm(range(args.T_max)):
if done:
state, done = env.reset(), False
if T % args.replay_frequency == 0:
dqn.reset_noise() # Draw a new set of noisy weights
action = dqn.act(state) # Choose an action greedily (with noisy weights)
next_state, reward, done = env.step(action) # Step
if args.reward_clip > 0:
reward = max(min(reward, args.reward_clip), -args.reward_clip) # Clip rewards
mem.append(state, action, reward, done) # Append transition to memory
T += 1
if T % args.log_interval == 0:
log('T = ' + str(T) + ' / ' + str(args.T_max))
# Train and test
if T >= args.learn_start:
mem.priority_weight = min(mem.priority_weight + priority_weight_increase,
1) # Anneal importance sampling weight β to 1
if T % args.replay_frequency == 0:
dqn.learn(mem) # Train with n-step distributional double-Q learning
if T % args.evaluation_interval == 0:
dqn.eval() # Set DQN (online network) to evaluation mode
results = test(args, T, dqn, val_mem, env=env, env_name=full_env_name) # Test
log('T = ' + str(T) + ' / ' + str(args.T_max) + ' | Avg. reward: ' + str(
results['avg_reward']) + ' | Avg. Q: ' + str(results['avg_Q']))
wandb.log({'episodic_reward': results['avg_reward'], 'average Q': results['avg_Q']}, step=T)
dqn.train() # Set DQN (online network) back to training mode
# Update target network
if T % args.target_update == 0:
dqn.update_target_net()
state = next_state
env.close()
| 10,120 | 46.294393 | 122 | py |
bcts | bcts-main/cule_env.py | import torch
import cv2 # Note that importing cv2 before torch may cause segfaults?
from env import Env
from torchcule.atari import Env as AtariEnv
from torchcule.atari import Rom as AtariRom
class CuleEnv(Env):
def __init__(self, args, full_env_name):
super(CuleEnv, self).__init__(args)
env_name = full_env_name
self.device = args.device
cart = AtariRom(env_name)
actions = cart.minimal_actions()
self.env = AtariEnv(env_name, num_envs=1, color_mode='gray', repeat_prob=0.0, device=torch.device("cpu"),
rescale=True, episodic_life=False, frameskip=4, action_set=actions)
super(AtariEnv, self.env).reset(0)
self.env.reset(initial_steps=1, verbose=1)
def _reset_buffer(self):
for _ in range(self.window):
self.state_buffer.append(torch.zeros(84, 84, device=self.device))
def reset(self):
obs = torch.zeros(84, 84, device=self.device)
if self.life_termination:
self.life_termination = False # Reset flag
self.env.step([0]) # Use a no-op after loss of life
else:
# Reset internals
self._reset_buffer()
# Perform up to 30 random no-ops before starting
obs = self.env.reset(initial_steps=1, verbose=1)
obs = obs[0, :, :, 0].to(self.device)
obs = obs / 255
self.last_frame = obs
self.state_buffer.append(obs)
self.lives = self.env.lives
return torch.stack(list(self.state_buffer), 0)
def step(self, action):
# Repeat action 4 times, max pool over last 2 frames
obs, reward, done, info = self.env.step(torch.tensor([action]))
if self.lives is None:
self.lives = self.env.lives.item()
obs = obs[0, :, :, 0].to(self.device) / 255
self.state_buffer.append(obs)
self.last_frame = obs
# Detect loss of life as terminal in training mode
lives = info['ale.lives'][0]
if self.training:
if lives < self.lives and lives > 0: # Lives > 0 for Q*bert
self.life_termination = not done # Only set flag when not truly done
done = True
self.lives = lives
# Return state, reward, done
return torch.stack(list(self.state_buffer), 0), reward, done
def render(self):
cv2.imshow('screen', self.last_frame)
cv2.waitKey(1)
| 2,453 | 37.952381 | 113 | py |
bcts | bcts-main/test.py | import os
import plotly
from plotly.graph_objs import Scatter
from plotly.graph_objs.scatter import Line
import torch
from cule_env import CuleEnv
from tqdm import tqdm
import numpy as np
import time
from scipy.stats import mstats
import wandb
from copy import deepcopy
FIRE_LIST = ['Breakout', 'Beam']
# Globals
Ts, rewards, Qs, best_avg_reward = [], [], [], -1e10
def extract_stats(rew_vector):
rew_vector_np = np.asarray(rew_vector)
avg_reward = rew_vector_np.mean()
std_rew, std_q = np.asarray(rew_vector_np).std(), np.asarray(rew_vector_np).std()
max_rew, med_rew = rew_vector_np.max(), np.median(rew_vector_np)
quantiles = mstats.mquantiles(rew_vector_np, axis=0)
results = {}
results['avg_reward'] = avg_reward
results['std_rew'] = std_rew
results['std_q'] = std_q
results['max_rew'] = max_rew
results['med_rew'] = med_rew
results['first_quantile'] = quantiles[0]
results['last_quantile'] = quantiles[-1]
return results
# Test DQN
def test(args, T, dqn, val_mem, evaluate=False, env=None, env_name=None):
global Ts, rewards, Qs, best_avg_reward
if env is None:
# env = Env(args)
env = CuleEnv(args)
env.eval()
Ts.append(T)
T_rewards, T_Qs = [], []
fire_env = len([e for e in FIRE_LIST if e in env_name]) > 0
# Test performance over several episodes
done = True
time_log = []
mean_depth_vec = []
for i_ep in tqdm(range(args.evaluation_episodes)):
c = 0
while True:
c += 1
if done:
state, reward_sum, done, fire_reset = env.reset(), 0, False, fire_env
lives = env.lives.clone()
# if c % 5 == 0:
# import matplotlib.pyplot as plt
# plt.imshow(state[-1].cpu())
# plt.show()
max_cut_time = [0]
start_time = time.time()
discard_timing = [fire_reset]
action = 1 if fire_reset else dqn.act_e_greedy(state, discard_timing=discard_timing,
max_cut_time=max_cut_time) # Choose an action ε-greedily
end_time = time.time()
if not discard_timing[0]:
time_log.append(end_time - start_time - max_cut_time[0])
# print('avg step time: {}, std step time: {}'.format(np.asarray(time_log).mean(), np.asarray(time_log).std()))
state, reward, done = env.step(action) # Step
reward_sum += reward
# Check for lost life
new_lives = env.lives.clone()
fire_reset = new_lives < lives and fire_env
lives.copy_(new_lives)
if args.render:
env.render()
if done:
T_rewards.append(reward_sum)
print('trunc count: {}/{}'.format(dqn.cule_bfs.trunc_count, c))
print('ep rew: {}'.format(reward_sum))
results_rolling = deepcopy(extract_stats(T_rewards))
for key in list(results_rolling.keys()):
results_rolling[key + '_rolling'] = results_rolling.pop(key)
results_rolling['step_time_avg'], results_rolling['step_time_std'] = np.asarray(time_log).mean(), np.asarray(time_log).std()
results_rolling['ep_rew'] = reward_sum
ep_mean_depth = np.mean(dqn.cule_bfs.mean_depth_vec)
results_rolling['mean_depth'] = ep_mean_depth; dqn.cule_bfs.reset_mean_depth_vec()
mean_depth_vec.append(ep_mean_depth)
wandb.log(results_rolling, step=i_ep)
dqn.cule_bfs.trunc_count = 0
break
env.close()
# Test Q-values over validation memory
for state in val_mem: # Iterate over valid states
T_Qs.append(dqn.evaluate_q(state))
results = extract_stats(T_rewards)
results['mean_depth_final'] = np.mean(mean_depth_vec)
if not evaluate:
# Append to results
rewards.append(T_rewards)
Qs.append(T_Qs)
# Plot
_plot_line(Ts, rewards, 'Reward', path='results')
_plot_line(Ts, Qs, 'Q', path='results')
# Save model parameters if improved
if results['avg_reward'] > best_avg_reward:
best_avg_reward = results['avg_reward']
dqn.save('results')
# Return average reward and Q-value
return results
# Plots min, max and mean + standard deviation bars of a population over time
def _plot_line(xs, ys_population, title, path=''):
max_colour, mean_colour, std_colour, transparent = 'rgb(0, 132, 180)', 'rgb(0, 172, 237)', 'rgba(29, 202, 255, 0.2)', 'rgba(0, 0, 0, 0)'
ys = torch.tensor(ys_population, dtype=torch.float32)
ys_min, ys_max, ys_mean, ys_std = ys.min(1)[0].squeeze(), ys.max(1)[0].squeeze(), ys.mean(1).squeeze(), ys.std(1).squeeze()
ys_upper, ys_lower = ys_mean + ys_std, ys_mean - ys_std
trace_max = Scatter(x=xs, y=ys_max.numpy(), line=Line(color=max_colour, dash='dash'), name='Max')
trace_upper = Scatter(x=xs, y=ys_upper.numpy(), line=Line(color=transparent), name='+1 Std. Dev.', showlegend=False)
trace_mean = Scatter(x=xs, y=ys_mean.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=mean_colour), name='Mean')
trace_lower = Scatter(x=xs, y=ys_lower.numpy(), fill='tonexty', fillcolor=std_colour, line=Line(color=transparent), name='-1 Std. Dev.', showlegend=False)
trace_min = Scatter(x=xs, y=ys_min.numpy(), line=Line(color=max_colour, dash='dash'), name='Min')
plotly.offline.plot({
'data': [trace_upper, trace_mean, trace_lower, trace_min, trace_max],
'layout': dict(title=title, xaxis={'title': 'Step'}, yaxis={'title': title})
}, filename=os.path.join(path, title + '.html'), auto_open=False)
| 5,371 | 36.566434 | 156 | py |
bcts | bcts-main/cule_bfs.py | import torch
import numpy as np
from torchcule.atari import Env as AtariEnv
from torchcule.atari import Rom as AtariRom
import time
RAND_FIRE_LIST = ['Breakout']
CROSSOVER_DICT = {'MsPacman': 1, 'Breakout': 2, 'Assault': 2, 'Krull': 2, 'Pong': 1, 'Boxing': 1, 'Asteroids': 1}
OP_FACTOR_DICT = {'Video': [1.1] * 4, 'Space': [1.15] * 4, 'Breakout': [1.125, 1.2, 1.125, 1.075],
'Asteroids': [1.0] * 4,
'Frostbite': [1.1] * 4, 'Beam': [1.15, 1.025, 1.025]}
class CuleBFS():
def __init__(self, env_name, tree_depth, gamma=0.99, verbose=False, ale_start_steps=1,
ignore_value_function=False, perturb_reward=True, step_env=None, args=None): # value_std_thresh=0.035
self.crossover_level = 1
for k, v in CROSSOVER_DICT.items():
if k in env_name:
self.crossover_level = v
break
self.op_scale_factor = args.op_scale_factor
self.op_scale_factor_per_depth = [self.op_scale_factor] * 4
if not args.override_default_op_factor:
for k, v in OP_FACTOR_DICT.items():
if k in env_name:
self.op_scale_factor = v[tree_depth - 1]
self.op_scale_factor_per_depth = v
break
self.prunning_std_thresh = args.prunning_std_thresh
self.args = args
self.verbose = verbose
self.print_times = False
self.ale_start_steps = ale_start_steps
self.gamma = gamma
self.max_depth = tree_depth
self.env_name = env_name
self.ignore_value_function = ignore_value_function
self.perturb_reward = perturb_reward
self.tree_top_percentile = args.tree_top_percentile
cart = AtariRom(env_name)
self.min_actions = cart.minimal_actions()
self.min_actions_size = len(self.min_actions)
num_envs = self.min_actions_size ** tree_depth
self.gpu_env = self.get_env(num_envs, device=torch.device("cuda", 0))
if self.crossover_level == -1:
self.cpu_env = self.gpu_env
else:
self.cpu_env = self.get_env(num_envs, device=torch.device("cpu"))
self.step_env = step_env
self.num_leaves = num_envs
self.gpu_actions = self.gpu_env.action_set
self.cpu_actions = self.gpu_actions.to(self.cpu_env.device)
self.device = self.gpu_env.device
self.envs = [self.gpu_env]
self.num_envs = 1
self.trunc_count = 0
self.use_max_diff_cut = self.prunning_std_thresh < 100 # use_max_diff_cut
# We don't collect statistics for last depth since its meaningless (won't prune anyhow)
self.diff_max_2ndmax = np.zeros((max(self.max_depth - 1, 0), 10000))
self.diff_max_2ndmax_idx = np.zeros(max(self.max_depth - 1, 0), np.int)
self.diff_max_2ndmax_1 = np.zeros(max(self.max_depth - 1, 0))
self.diff_max_2ndmax_2 = np.zeros(max(self.max_depth - 1, 0))
self.mean_depth_vec = []
def reset_mean_depth_vec(self):
self.mean_depth_vec = []
def get_env(self, num_envs, device):
env = AtariEnv(self.env_name, num_envs, color_mode='gray', repeat_prob=0.0, device=device, rescale=True,
episodic_life=True, frameskip=4, action_set=self.min_actions)
super(AtariEnv, env).reset(0)
initial_steps_rand = 1
env.reset(initial_steps=initial_steps_rand, verbose=self.verbose)
# env.train()
return env
def bfs(self, state, q_net, support, args, fire_pressed=[False], max_cut_time=[0]):
state_clone = state.clone().detach()
max_depth = args.tree_depth
fire_env = len([e for e in RAND_FIRE_LIST if e in self.env_name]) > 0
if fire_env and np.random.rand() < 0.01:
# make sure 'FIRE' is pressed often enough to launch ball after life loss
# return torch.tensor([1], device=self.device), torch.tensor(0, device=self.device)
fire_pressed[0] = True
return 1
print_times = self.print_times
cpu_env = self.cpu_env
gpu_env = self.gpu_env
step_env = self.step_env
# Set device environment root state before calling step function
cpu_env.states[0] = step_env.states[0]
cpu_env.ram[0] = step_env.ram[0]
cpu_env.frame_states[0] = step_env.frame_states[0]
# Zero out all buffers before calling any environment functions
cpu_env.rewards.zero_()
cpu_env.observations1.zero_()
cpu_env.observations2.zero_()
cpu_env.done.zero_()
# Make sure all actions in the backend are completed
# Be careful making calls to pytorch functions between cule synchronization calls
if gpu_env.is_cuda:
gpu_env.sync_other_stream()
if print_times:
total_start = torch.cuda.Event(enable_timing=True)
total_end = torch.cuda.Event(enable_timing=True)
total_start.record()
# Create a default depth_env pointing to the CPU backend
depth_env = cpu_env
depth_actions_initial = self.cpu_actions
num_envs = 1
relevant_env = depth_env if max_depth > 0 else step_env
for depth in range(max_depth):
if print_times:
depth_start = torch.cuda.Event(enable_timing=True)
depth_end = torch.cuda.Event(enable_timing=True)
depth_start.record()
# By level 3 there should be enough states to warrant moving to the GPU.
# We do this by copying all of the relevant state information between the
# backend GPU and CPU instances.
if depth == self.crossover_level:
self.copy_envs(cpu_env, gpu_env)
depth_env = gpu_env
relevant_env = depth_env if max_depth > 0 else step_env
depth_actions_initial = self.gpu_actions
# Compute the number of environments at the current depth
num_envs = self.min_actions_size ** (depth + 1)
# depth_env.set_size(num_envs)
depth_env.expand(num_envs)
depth_actions = depth_actions_initial.repeat(self.min_actions_size ** depth)
# Loop over the number of frameskips
for frame in range(depth_env.frameskip):
# Execute backend call to the C++ step function with environment data
super(AtariEnv, depth_env).step(depth_env.fire_reset and depth_env.is_training, False,
depth_actions.data_ptr(), 0, depth_env.done.data_ptr(), 0)
# Update the reward, done, and lives flags
depth_env.get_data(depth_env.episodic_life, self.gamma ** depth, depth_env.done.data_ptr(),
depth_env.rewards.data_ptr(), depth_env.lives.data_ptr())
# To properly compute the output observations we need the last frame AND the second to last frame.
# On the second to last step we need to update the frame buffers
if not self.ignore_value_function:
if frame == (depth_env.frameskip - 2):
depth_env.generate_frames(depth_env.rescale, False, depth_env.num_channels,
depth_env.observations2[:num_envs].data_ptr())
if frame == (depth_env.frameskip - 1):
depth_env.generate_frames(depth_env.rescale, False, depth_env.num_channels,
depth_env.observations1[:num_envs].data_ptr())
new_obs = torch.max(depth_env.observations1[:num_envs], depth_env.observations2[:num_envs])
new_obs = new_obs / 255
# import matplotlib.pyplot as plt
# for i in range(4):
# plt.imshow(state_clone[i][0].cpu())
# plt.show()
new_obs = new_obs.squeeze(dim=-1).unsqueeze(dim=1).to(self.device)
state_clone = self.replicate_state(state_clone)
state_clone = torch.cat((state_clone[:, 1: args.history_length, :, :], new_obs), dim=1)
# obs = obs[:num_envs].to(gpu_env.device).permute((0, 3, 1, 2))
if print_times:
depth_end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
if print_times:
depth_runtime = depth_start.elapsed_time(depth_end)
print('Level {} with {} environments: {:4.4f} (ms)'.format(depth, num_envs, depth_runtime))
if depth < max_depth - 1:
cut_time_start = time.time()
max_cut_condition = self.use_max_diff_cut and \
self.compute_max_cut_condition(gpu_env, num_envs, state_clone, q_net,
depth=depth, env=relevant_env, support=support)
cut_time_end = time.time()
max_cut_time[0] += cut_time_end - cut_time_start
if max_cut_condition:
max_depth = depth + 1
torch.cuda.synchronize()
break
# relevant_env = depth_env if max_depth > 0 else step_env
self.mean_depth_vec.append(max_depth)
# positive_reward = num_envs > relevant_env.rewards[:num_envs].to(gpu_env.device).sum() > 0 or True
d0_values = self.compute_value(state, q_net, support)
d0_act = d0_values.argmax(1).item()
if max_depth == 0: # or not positive_reward:
return d0_act
# Make sure all actions in the backend are completed
if depth_env.is_cuda:
depth_env.sync_this_stream()
torch.cuda.current_stream().synchronize()
# Form observations using max of last 2 frame_buffers
# torch.max(depth_env.observations1[:num_envs], depth_env.observations2[:num_envs], out=depth_env.observations1[:num_envs])
if print_times:
total_end.record()
# Waits for everything to finish running
torch.cuda.synchronize()
if print_times:
total_runtime = total_start.elapsed_time(total_end)
print('Total expansion time: {:4.4f} (ms)'.format(total_runtime))
value_start = torch.cuda.Event(enable_timing=True)
value_end = torch.cuda.Event(enable_timing=True)
value_start.record()
rewards = self.compute_rewards(gpu_env, num_envs, state_clone, q_net, depth=max_depth, env=relevant_env,
support=support)
def perturb(rew):
p_rew = rew
if self.perturb_reward:
p_rew += torch.normal(mean=torch.zeros_like(rew), std=1e-5)
return p_rew
rewards = perturb(rewards)
best_value = rewards.max()
size_subtree = self.min_actions_size ** (max_depth - 1)
d0_act_idx = slice(size_subtree * d0_act, size_subtree * (d0_act + 1))
if self.args.multiplicative_op_factor:
rewards[d0_act_idx] *= self.op_scale_factor_per_depth[max_depth - 1]
else:
rewards[d0_act_idx] += (self.op_scale_factor_per_depth[max_depth - 1] - 1) * rewards.mean()
# delta_half = abs(d0_values - rewards_depth1)[0] / 2
# sigma_o = delta_half[d0_act]
# sigma_e = (delta_half.sum() - sigma_o) / (len(delta_half) - 1)
# bonus = self.gamma ** max_depth * np.sqrt(2 * np.log(self.min_actions_size)) * (sigma_e * np.sqrt(max_depth) - sigma_o * np.sqrt(max_depth - 1))
# rewards[d0_act_idx] += bonus
if self.tree_top_percentile and max_depth > 0:
top_prec_num = round(self.min_actions_size / 3) # round(len(rewards) / self.min_actions_size ** 2 + 1)
top_percentile = torch.tensor([torch.sort(e, descending=True)[0][:top_prec_num].mean() for e in
torch.split(rewards, self.min_actions_size)])
top_percentile = perturb(top_percentile)
best_value = top_percentile.max()
best_action = top_percentile.argmax() // depth_env.action_space.n ** (max_depth - 2)
else:
best_action = rewards.argmax() // depth_env.action_space.n ** (max_depth - 1)
self.trunc_count += int(best_action != d0_act)
cpu_env.set_size(1)
gpu_env.set_size(1)
if False and print_times: # currently cancelled due to crash
torch.cuda.synchronize()
value_end.record()
value_runtime = value_start.elapsed_time(value_end)
print('Total value computation time: {:4.4f} (ms)'.format(value_runtime))
return best_action.unsqueeze(-1)
def replicate_state(self, state):
if len(state.shape) == 3:
state = state.unsqueeze(dim=0)
tmp = state.reshape(state.shape[0], -1)
tmp = tmp.repeat(1, self.min_actions_size).view(-1, tmp.shape[1])
return tmp.reshape(tmp.shape[0], *state.shape[1:])
def compute_rewards(self, gpu_env, num_envs, obs, q_net, depth, env, support):
rewards = env.rewards[:num_envs].to(gpu_env.device)
done = env.done[:num_envs].to(gpu_env.device)
if (~done).any() and not self.ignore_value_function:
not_done_value = self.compute_value(obs[~done, :], q_net, support).max(1)[0]
rewards[~done] += (self.gamma ** depth) * not_done_value
return rewards
def copy_envs(self, source_env, target_env):
print_times = self.print_times
if print_times:
copy_start = torch.cuda.Event(enable_timing=True)
copy_end = torch.cuda.Event(enable_timing=True)
target_env.set_size(source_env.size())
if print_times:
copy_start.record()
target_env.states.copy_(source_env.states)
target_env.ram.copy_(source_env.ram)
target_env.rewards.copy_(source_env.rewards)
target_env.done.copy_(source_env.done)
target_env.frame_states.copy_(source_env.frame_states)
if print_times:
copy_end.record()
torch.cuda.synchronize()
target_env.update_frame_states()
if print_times:
depth_copytime = copy_start.elapsed_time(copy_end)
print('Depth copy time: {:4.4f} (ms)'.format(depth_copytime))
def compute_value(self, state, q_net, support):
with torch.no_grad():
if len(state.shape) == 3:
state = state.unsqueeze(dim=0)
# return (q_net(state.unsqueeze(0)) * support).sum(2).argmax(1).item()
if state.shape[0] == 2744:
stds = [
((q_net(state)[0, j, :] * support ** 2).sum() - (q_net(state)[0, j, :] * support).sum() ** 2).item()
for j in range(14)]
if max(stds) - min(stds) > 0.1:
import matplotlib.pyplot as plt
import numpy as np
plt.plot(support.cpu(), q_net(state)[0, np.argmax(stds), :].cpu())
plt.plot(support.cpu(), q_net(state)[0, np.argmin(stds), :].cpu())
plt.show()
fig = plt.figure()
for i in range(14):
plt.plot(support.cpu(), q_net(state)[0, i, :].cpu())
plt.show()
return (q_net(state) * support).sum(2)
def compute_max_cut_condition(self, gpu_env, num_envs, state_clone, q_net, depth, env, support):
rewards = self.compute_rewards(gpu_env, num_envs, state_clone, q_net, depth=depth + 1, env=env, support=support)
max_val_per_subtree = torch.tensor(
[e.max() for e in torch.split(rewards, int(len(rewards) / self.min_actions_size))])
max_val = max_val_per_subtree.max()
cur_idx = self.diff_max_2ndmax_idx[depth] % len(self.diff_max_2ndmax[depth])
nsamples = min(self.diff_max_2ndmax_idx[depth] + 1, len(self.diff_max_2ndmax[depth]))
# first clean up previous value (if a full cycle or more finished)
self.diff_max_2ndmax_2[depth] -= self.diff_max_2ndmax[depth][cur_idx] ** 2
self.diff_max_2ndmax_1[depth] -= self.diff_max_2ndmax[depth][cur_idx]
if (max_val_per_subtree == max_val).all():
new_diff_max = 0.0
else:
large_const = 2 * max_val
max_val_per_subtree[max_val_per_subtree == max_val] = -large_const
new_diff_max = float(max_val - max_val_per_subtree.max())
max_val_per_subtree[max_val_per_subtree == -large_const] = max_val
self.diff_max_2ndmax_2[depth] += new_diff_max ** 2
self.diff_max_2ndmax_1[depth] += new_diff_max
if nsamples > 1:
med_diff = np.median(self.diff_max_2ndmax[depth][:nsamples])
max_cut_condition = new_diff_max > self.prunning_std_thresh * med_diff
else:
max_cut_condition = False
self.diff_max_2ndmax[depth][cur_idx] = new_diff_max
self.diff_max_2ndmax_idx[depth] += 1
return max_cut_condition
| 17,191 | 48.260745 | 158 | py |
bcts | bcts-main/bcts_agent.py | import random
import torch
from agent import Agent
from cule_bfs import CuleBFS
import os
class BCTSAgent(Agent):
def __init__(self, args, env, full_env_name):
super(BCTSAgent, self).__init__(args, env)
self.args = args
self.full_env_name = full_env_name
if args.use_cule:
self.cule_bfs = CuleBFS(env_name=full_env_name, tree_depth=args.tree_depth, verbose=False,
ale_start_steps=1,
ignore_value_function=False, perturb_reward=True, step_env=env.env, args=args)
if args.model and os.path.isfile(args.model):
# Loading happens in the parent class; here we only alert
print('Loaded pretrained agent from: {}'.format(args.model))
else:
raise FileNotFoundError('Pretrained agent not found: {}'.format(args.model))
# Acts based on single state (no batch)
def act(self, state, fire_pressed=[False], max_cut_time=[0]):
if self.args.use_cule:
return self.cule_bfs.bfs(state, self.online_net, self.support, self.args, fire_pressed=fire_pressed,
max_cut_time=max_cut_time)
else:
with torch.no_grad():
return (self.online_net(state.unsqueeze(0)) * self.support).sum(2).argmax(1).item()
# Acts with an ε-greedy policy (used for evaluation only)
def act_e_greedy(self, state, epsilon=0.001, discard_timing=[False], max_cut_time=[0]):
fire_pressed = [False]
# High ε can reduce evaluation scores drastically
if random.random() < epsilon:
exploring = True
action = random.randrange(self.action_space)
else:
exploring = False
action = self.act(state, fire_pressed=fire_pressed, max_cut_time=max_cut_time)
discard_timing[0] = fire_pressed[0] or exploring
return action
| 1,933 | 42.954545 | 114 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/base.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Base classes for the implementation of a generic syntax tree."""
import functools
import re
from abc import ABC, abstractmethod
from enum import Enum
from typing import Any, Generic, List, Optional, Sequence, Tuple, TypeVar, Union, cast
from ltlf2dfa.helpers import Hashable, Wrapper, check_
from ltlf2dfa.symbols import OpSymbol, Symbols
AtomSymbol = Union["QuotedFormula", str]
class Logic(Enum):
"""Logic classes."""
LTLf = "ltlf"
PPLTL = "ppltl"
class Formula(Hashable, ABC):
"""Abstract class for a formula."""
@abstractmethod
def find_labels(self) -> List[AtomSymbol]:
"""Return the list of symbols."""
def to_nnf(self) -> "Formula":
"""Transform the formula in NNF."""
return self
@abstractmethod
def negate(self) -> "Formula":
"""Negate the formula. Used by 'to_nnf'."""
@abstractmethod
def to_mona(self, v: Optional[Any] = None) -> str:
"""Transform the formula in MONA."""
class AtomicFormula(Formula, ABC):
"""An abstract atomic formula.
Both formulas and names can be used as atomic symbols.
A name must be a string made of letters, numbers, underscores, or it must
be a quoted string.
"""
name_regex = re.compile(r'(\w+)|(".*")')
def __init__(self, s: Union[AtomSymbol, Formula]):
"""Inintializes the atomic formula.
:param s: the atomic symbol. Formulas are implicitly converted to
quoted formulas.
"""
super().__init__()
# If formula
if isinstance(s, Formula):
self.s = QuotedFormula(s) # type: AtomSymbol
# If name
else:
self.s = str(s)
if not self.name_regex.fullmatch(self.s):
raise ValueError(
"The symbol name does not respect the naming convention."
)
def _members(self):
return self.s
def __str__(self):
"""Get the string representation."""
return str(self.s)
def find_labels(self) -> List[AtomSymbol]:
"""Return the list of symbols."""
return [self.s]
class QuotedFormula(Wrapper):
"""This object is a constant representation of a formula.
This can be used as a normal formula. Quoted formulas can also be used as
hashable objects and for atomic symbols.
"""
def __init__(self, f: Formula):
"""Initialize.
:param f: formula to represent.
"""
super().__init__(f)
self.__dict__["_QuotedFormula__str"] = '"' + str(f) + '"'
def __str__(self):
"""Cache str."""
return self.__str
def __repr__(self):
"""Nice representation."""
return str(self)
class MonaProgram:
"""Implements a MONA program."""
HEADER = "var2 $ where ~ex1 p where true: p notin $ & p+1 in $;\nallpos $"
vars: List[str] = []
def __init__(self, f: Formula):
"""Initialize.
:param f: formula to encode.
:param i: instant of evaluation in the trace.
"""
self.formula = f
self._vars()
def _vars(self):
"""List MONA vars."""
self.vars = [v.upper() for v in self.formula.find_labels()]
def __repr__(self):
"""Nice representation."""
return str(self)
def mona_program(self) -> str:
"""Construct the MONA program."""
if self.vars:
return f"#{str(self.formula)};\n{self.HEADER};\nvar2 {', '.join(self.vars)};\n{self.formula.to_mona()};\n"
return f"#{str(self.formula)};\n{self.HEADER};\n{self.formula.to_mona()};\n"
class Operator(Formula, ABC):
"""Implements an operator."""
base_expression = (
Symbols.ROUND_BRACKET_LEFT.value + "%s" + Symbols.ROUND_BRACKET_RIGHT.value
)
@property
@abstractmethod
def operator_symbol(self) -> OpSymbol:
"""Get the symbol of the operator."""
T = TypeVar("T")
OperatorChildren = Sequence[T]
class UnaryOperator(Generic[T], Operator, ABC):
"""A class to represent unary operator."""
def __init__(self, f: T):
"""
Instantiate the unary operator over a formula.
:param f: the formula on which the operator is applied.
"""
super().__init__()
self.f = f
def __str__(self):
"""Get the string representation."""
return (
str(self.operator_symbol)
+ Symbols.ROUND_BRACKET_LEFT.value
+ str(self.f)
+ Symbols.ROUND_BRACKET_RIGHT.value
)
def _members(self):
return self.operator_symbol, self.f
def __lt__(self, other):
"""Compare the formula with another formula."""
return self.f.__lt__(other.f)
def find_labels(self) -> List[AtomSymbol]:
"""Return the list of symbols."""
return cast(Formula, self.f).find_labels()
class BinaryOperator(Generic[T], Operator, ABC):
"""A generic binary formula."""
def __init__(self, formulas: OperatorChildren):
"""
Initialize the binary operator.
:param formulas: the children formulas of the operator.
"""
super().__init__()
check_(len(formulas) >= 2)
self.formulas = tuple(formulas) # type: OperatorChildren
def __str__(self):
"""Return the string representation."""
return (
"("
+ (" " + str(self.operator_symbol) + " ").join(map(str, self.formulas))
+ ")"
)
def _members(self) -> Tuple[OpSymbol, OperatorChildren]:
return self.operator_symbol, self.formulas
def find_labels(self) -> List[AtomSymbol]:
"""Return the list of symbols."""
# return set.union(*map(lambda f: f.find_labels(), self.formulas))))
# seen = set()
# result = []
# for f in self.formulas:
# for lab in f.find_labels():
# if lab not in seen:
# result.append(lab)
# seen.add(lab)
# return result
return flatten(self.formulas)
def to_nnf(self):
"""Transform in NNF."""
return type(self)([f.to_nnf() for f in self.formulas])
@functools.singledispatch
def flatten(lst) -> List:
"""Flatten a list of lists."""
return [item for sublist in lst for item in sublist.find_labels()]
def _flatten_as_set(lst):
"""Flatten a list of lists removing duplicates."""
seen = set()
result = []
for f in lst:
for lab in f.find_labels():
if lab not in seen:
result.append(lab)
seen.add(lab)
return result
@flatten.register(tuple)
def _(lst: Tuple[Formula]):
"""Flatten a list of lists of formulas."""
return _flatten_as_set(lst)
| 7,465 | 26.651852 | 118 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/ltlf2dfa.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Main module of the pakage."""
import itertools as it
import os
import re
import signal
from subprocess import PIPE, Popen, TimeoutExpired # nosec B404
from sympy import And, Not, Or, simplify, symbols
from ltlf2dfa.base import MonaProgram
from ltlf2dfa.helpers import check_
PACKAGE_DIR = os.path.dirname(os.path.abspath(__file__))
UNSAT_DOT = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle];
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 1 [label="true"];
}"""
def get_value(text, regex, value_type=float):
"""Dump a value from a file based on a regex passed in."""
pattern = re.compile(regex, re.MULTILINE)
results = pattern.search(text)
if results:
return value_type(results.group(1))
print(f"Could not find the value {regex}, in the text provided")
return value_type(0.0)
def ter2symb(ap, ternary):
"""Translate ternary output to symbolic."""
expr = And()
i = 0
for value in ternary:
if value == "1":
expr = And(expr, ap[i] if isinstance(ap, tuple) else ap)
elif value == "0":
check_(value == "0")
expr = And(expr, Not(ap[i] if isinstance(ap, tuple) else ap))
else:
check_(value == "X", "[ERROR]: the guard is not X")
i += 1
return expr
def simplify_guard(guards):
"""Make a big OR among guards and simplify them."""
return simplify(Or(*guards))
def parse_mona(mona_output):
"""Parse mona output and construct a dot."""
free_variables = get_value(
mona_output, r".*DFA for formula with free variables:[\s]*(.*?)\n.*", str
)
if "state" in free_variables:
free_variables = None
else:
free_variables = symbols(
" ".join(
x.strip().lower() for x in free_variables.split() if len(x.strip()) > 0
)
)
# initial_state = get_value(mona_output, '.*Initial state:[\s]*(\d+)\n.*', int)
accepting_states = get_value(mona_output, r".*Accepting states:[\s]*(.*?)\n.*", str)
accepting_states = [
str(x.strip()) for x in accepting_states.split() if len(x.strip()) > 0
]
# num_states = get_value(mona_output, '.*Automaton has[\s]*(\d+)[\s]states.*', int) - 1
dot = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];\n"""
dot += f" node [shape = doublecircle]; {'; '.join(accepting_states)};\n"
dot += """ node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;\n"""
dot_trans = {} # maps each couple (src, dst) to a list of guards
for line in mona_output.splitlines():
if line.startswith("State "):
orig_state = get_value(line, r".*State[\s]*(\d+):\s.*", int)
guard = get_value(line, r".*:[\s](.*?)[\s]->.*", str)
if free_variables:
guard = ter2symb(free_variables, guard)
else:
guard = ter2symb(free_variables, "X")
dest_state = get_value(line, r".*state[\s]*(\d+)[\s]*.*", int)
if orig_state:
if (orig_state, dest_state) in dot_trans:
dot_trans[(orig_state, dest_state)].append(guard)
else:
dot_trans[(orig_state, dest_state)] = [guard]
for c, guards in dot_trans.items():
simplified_guard = simplify_guard(guards)
dot += f' {c[0]} -> {c[1]} [label="{str(simplified_guard).lower()}"];\n'
dot += "}"
return dot
def compute_declare_assumption(s):
"""Compute declare assumptions."""
pairs = list(it.combinations(s, 2))
if pairs:
first_assumption = "~(ex1 y: 0<=y & y<=max($) & ~("
for symbol in s:
if symbol == s[-1]:
first_assumption += "y in " + symbol + "))"
else:
first_assumption += "y in " + symbol + " | "
second_assumption = "~(ex1 y: 0<=y & y<=max($) & ~("
for pair in pairs:
if pair == pairs[-1]:
second_assumption += f"(y notin {pair[0]} | y notin {pair[1]})));"
else:
second_assumption += f"(y notin {pair[0]} | y notin {pair[1]}) & "
return first_assumption + " & " + second_assumption
return None
def createMonafile(p: str):
"""Write the .mona file."""
try:
with open(f"{PACKAGE_DIR}/automa.mona", "w+", encoding="utf-8") as file:
file.write(p)
except IOError:
print("[ERROR]: Problem opening the automa.mona file!")
def invoke_mona():
"""Execute the MONA tool."""
command = f"mona -q -u -w {PACKAGE_DIR}/automa.mona"
process = Popen(
args=command,
stdout=PIPE,
stderr=PIPE,
preexec_fn=os.setsid,
shell=True,
encoding="utf-8",
)
try:
output, _ = process.communicate(timeout=30)
return str(output).strip()
except TimeoutExpired:
os.killpg(os.getpgid(process.pid), signal.SIGTERM)
return False
def output2dot(mona_output):
"""Parse the mona output or return the unsatisfiable dot."""
if "Formula is unsatisfiable" in mona_output:
return UNSAT_DOT
return parse_mona(mona_output)
def to_dfa(f, mona_dfa_out=False) -> str:
"""Translate to deterministic finite-state automaton."""
p = MonaProgram(f)
mona_p_string = p.mona_program()
createMonafile(mona_p_string)
mona_dfa = invoke_mona()
if mona_dfa_out:
return mona_dfa
check_(mona_dfa_out is False)
return output2dot(mona_dfa)
| 6,439 | 30.568627 | 91 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/__main__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This is the command line tool for the LTLf2DFA tool."""
import click # type: ignore
from ltlf2dfa.base import Logic
from ltlf2dfa.parser.ltlf import LTLfParser
from ltlf2dfa.parser.ppltl import PPLTLParser
def execute(logic, formula):
"""Transform the formula."""
try:
with open(formula, "r", encoding="utf-8") as f:
formula_str = f.read()
except Exception as exc:
raise IOError(
"[ERROR]: Something wrong occurred while parsing the domain and problem."
) from exc
if logic == Logic.LTLf:
f_parser = LTLfParser()
try:
parsed_formula = f_parser(formula_str)
except Exception as e:
raise ValueError(e) from e
elif logic == Logic.PPLTL:
p_parser = PPLTLParser()
try:
parsed_formula = p_parser(formula_str)
except Exception as e:
raise ValueError(e) from e
else:
raise ValueError("Formula has mixed future/past operators.")
dfa = parsed_formula.to_dfa(mona_dfa_out=True)
print(dfa)
@click.command()
@click.option(
"-l",
"--logic",
type=click.Choice(["ltlf", "ppltl"], case_sensitive=False),
required=True,
)
@click.option(
"-f",
"--formula",
required=True,
help="Path to the LTLf/PPLTL formula file.",
type=click.Path(exists=True, readable=True),
)
def cli(logic, formula):
"""From LTLf/PPLTL formulas to DFA."""
execute(Logic(logic.lower()), formula)
if __name__ == "__main__":
cli() # pragma: no cover
| 2,256 | 27.935897 | 85 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/ppltl.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains the implementation of Past Linear Temporal Logic on finite traces."""
import re
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from ltlf2dfa.base import (
AtomicFormula,
AtomSymbol,
BinaryOperator,
Formula,
UnaryOperator,
)
from ltlf2dfa.helpers import new_var
from ltlf2dfa.ltlf2dfa import to_dfa
from ltlf2dfa.pl import PLAtomic
from ltlf2dfa.symbols import OpSymbol, Symbols
class PPLTLFormula(Formula, ABC):
"""A class for the PPLTL formula."""
def to_nnf(self) -> "PPLTLFormula":
"""Convert an PPLTL formula in NNF."""
return self
@abstractmethod
def negate(self) -> "PPLTLFormula":
"""Negate the formula."""
def __repr__(self):
"""Get the representation."""
return self.__str__()
def to_mona(self, v: Optional[Any] = None) -> str:
"""
Tranform the PPLTL formula into its encoding in MONA.
:return: a string.
"""
raise NotImplementedError()
# def to_pldlf(self):
# """
# Tranform the formula into an equivalent LDLf formula.
#
# :return: an LDLf formula.
# """
def to_dfa(self, mona_dfa_out: bool = False) -> str:
"""
Translate into a DFA.
:param mona_dfa_out: flag for DFA output in MONA syntax.
"""
return to_dfa(self, mona_dfa_out)
class PPLTLUnaryOperator(UnaryOperator[PPLTLFormula], PPLTLFormula, ABC):
"""A unary operator for PPLTL."""
class PPLTLBinaryOperator(BinaryOperator[PPLTLFormula], PPLTLFormula, ABC):
"""A binary operator for PPLTL."""
class PPLTLAtomic(AtomicFormula, PPLTLFormula):
"""Class for PPLTL atomic formulas."""
name_regex = re.compile(r"[a-z][a-z0-9_]*")
def negate(self):
"""Negate the formula."""
return PPLTLNot(self)
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return PLAtomic(self.s).find_labels()
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL atomic formula."""
if v != "max($)":
return f"({v} in {self.s.upper()})"
return PLAtomic(self.s).to_mona(v="max($)")
# def to_pldlf(self):
# return LDLfPropositional(PLAtomic(self.s)).convert()
class PPLTLTrue(PPLTLAtomic):
"""Class for the PPLTL True formula."""
def __init__(self):
"""Initialize the formula."""
super().__init__(Symbols.TRUE.value)
def negate(self):
"""Negate the formula."""
return PPLTLFalse()
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return []
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding for True."""
return Symbols.TRUE.value
class PPLTLFalse(PPLTLAtomic):
"""Class for the PPLTL False formula."""
def __init__(self):
"""Initialize the formula."""
super().__init__(Symbols.FALSE.value)
def negate(self):
"""Negate the formula."""
return PPLTLTrue()
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return []
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding for False."""
return Symbols.FALSE.value
class PPLTLNot(PPLTLUnaryOperator):
"""Class for the PPLTL not formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.NOT.value
def to_nnf(self) -> PPLTLFormula:
"""Transform to NNF."""
if not isinstance(self.f, AtomicFormula):
return self.f.negate().to_nnf()
return self
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return self.f
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Not formula."""
return f"~({self.f.to_mona(v)})"
# def to_pldlf(self):
# return LDLfNot(self.f.to_pldlf())
class PPLTLAnd(PPLTLBinaryOperator):
"""Class for the PPLTL And formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.AND.value
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return PPLTLOr([f.negate() for f in self.formulas])
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL And formula."""
return f"({' & '.join([f.to_mona(v) for f in self.formulas])})"
# def to_pldlf(self):
# return LDLfAnd([f.to_pldlf() for f in self.formulas])
class PPLTLOr(PPLTLBinaryOperator):
"""Class for the PPLTL Or formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.OR.value
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return PPLTLAnd([f.negate() for f in self.formulas])
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Or formula."""
return f"({' | '.join([f.to_mona(v) for f in self.formulas])})"
class PPLTLImplies(PPLTLBinaryOperator):
"""Class for the PPLTL Implication formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.IMPLIES.value
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_nnf(self) -> PPLTLFormula:
"""Transform to NNF."""
first, second = self.formulas[0:2]
final_formula = PPLTLOr([PPLTLNot(first).to_nnf(), second.to_nnf()])
for subformula in self.formulas[2:]:
final_formula = PPLTLOr(
[PPLTLNot(final_formula).to_nnf(), subformula.to_nnf()]
)
return final_formula
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Implication formula."""
return self.to_nnf().to_mona(v)
class PPLTLEquivalence(PPLTLBinaryOperator):
"""Class for the PPLTL Equivalente formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.EQUIVALENCE.value
def to_nnf(self) -> PPLTLFormula:
"""Transform to NNF."""
fs = self.formulas
pos = PPLTLAnd(fs)
neg = PPLTLAnd([PPLTLNot(f) for f in fs])
res = PPLTLOr([pos, neg]).to_nnf()
return res
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Equivalence formula."""
return self.to_nnf().to_mona(v)
class PPLTLBefore(PPLTLUnaryOperator):
"""Class for the PPLTL Before formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.BEFORE.value
def to_nnf(self) -> PPLTLFormula:
"""Transform to NNF."""
return PPLTLBefore(self.f.to_nnf())
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return PPLTLWeakBefore(self.f.negate())
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Before formula."""
ex_var = new_var(v)
if v != "max($)":
return f"(ex1 {ex_var}: {ex_var} in $ & {ex_var}={v}-1 & {v}>0 & {self.f.to_mona(ex_var)})"
return f"(ex1 {ex_var}: {ex_var} in $ & {ex_var}=max($)-1 & max($)>0 & {self.f.to_mona(ex_var)})"
# def to_pldlf(self):
# return LDLfDiamond(
# RegExpPropositional(PLTrue()),
# LDLfAnd([self.f.to_pldlf(), LDLfNot(LDLfEnd())]),
# )
class PPLTLWeakBefore(PPLTLUnaryOperator):
"""Class for the PPLTL Weak Before formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.WEAK_BEFORE.value
def to_nnf(self) -> PPLTLFormula:
"""Transform to NNF."""
return PPLTLWeakBefore(self.f.to_nnf())
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return PPLTLBefore(self.f.negate())
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Weak Before formula."""
ex_var = new_var(v)
if v != "max($)":
return f"~(ex1 {ex_var}: {ex_var} in $ & {ex_var}={v}-1 & {v}>0 & ~({self.f.to_mona(ex_var)}))"
return f"~(ex1 {ex_var}: {ex_var} in $ & {ex_var}=max($)-1 & max($)>0 & ~({self.f.to_mona(ex_var)}))"
# def to_pldlf(self):
# return LDLfDiamond(
# RegExpPropositional(PLTrue()),
# LDLfAnd([self.f.to_pldlf(), LDLfNot(LDLfEnd())]),
# )
class PPLTLSince(PPLTLBinaryOperator):
"""Class for the PPLTL Since formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.SINCE.value
def to_nnf(self):
"""Transform to NNF."""
return PPLTLSince([f.to_nnf() for f in self.formulas])
def negate(self):
"""Negate the formula."""
return PPLTLPastRelease([f.negate() for f in self.formulas])
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Since formula."""
ex_var = new_var(v)
all_var = new_var(ex_var)
f1 = self.formulas[0].to_mona(v=all_var)
f2 = (
PPLTLSince(self.formulas[1:]).to_mona(v=ex_var)
if len(self.formulas) > 2
else self.formulas[1].to_mona(v=ex_var)
)
return (
f"(ex1 {ex_var}: {ex_var} in $ & 0<={ex_var}&{ex_var}<={v} & {f2} & "
f"(all1 {all_var}: {all_var} in $ & {ex_var}<{all_var}&{all_var}<={v} => {f1}))"
)
# def to_pldlf(self):
# f1 = self.formulas[0].to_pldlf()
# f2 = (
# PPLTLSince(self.formulas[1:]).to_pldlf()
# if len(self.formulas) > 2
# else self.formulas[1].to_pldlf()
# )
# return LDLfDiamond(
# RegExpStar(RegExpSequence([RegExpTest(f1), RegExpPropositional(PLTrue())])),
# LDLfAnd([f2, LDLfNot(LDLfEnd())]),
# )
class PPLTLPastRelease(PPLTLBinaryOperator):
"""Class for the PPLTL Past Release formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.PAST_RELEASE.value
def to_nnf(self):
"""Transform to NNF."""
return PPLTLPastRelease([f.to_nnf() for f in self.formulas])
def negate(self):
"""Negate the formula."""
return PPLTLSince([f.negate() for f in self.formulas])
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Past Release formula."""
ex_var = new_var(v)
all_var = new_var(ex_var)
f1 = self.formulas[0].to_mona(v=all_var)
f2 = (
PPLTLSince(self.formulas[1:]).to_mona(v=ex_var)
if len(self.formulas) > 2
else self.formulas[1].to_mona(v=ex_var)
)
return (
f"~(ex1 {ex_var}: {ex_var} in $ & 0<={ex_var}&{ex_var}<={v} & ~({f2}) & "
f"(all1 {all_var}: {all_var} in $ & {ex_var}<{all_var}&{all_var}<={v} => ~({f1})))"
)
# def to_pldlf(self):
# f1 = self.formulas[0].to_pldlf()
# f2 = (
# PPLTLSince(self.formulas[1:]).to_pldlf()
# if len(self.formulas) > 2
# else self.formulas[1].to_pldlf()
# )
# return LDLfDiamond(
# RegExpStar(RegExpSequence([RegExpTest(f1), RegExpPropositional(PLTrue())])),
# LDLfAnd([f2, LDLfNot(LDLfEnd())]),
# )
class PPLTLOnce(PPLTLUnaryOperator):
"""Class for the PPLTL Once formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.ONCE.value
def to_nnf(self) -> PPLTLFormula:
"""Transform to NNF."""
return PPLTLSince([PPLTLTrue(), self.f])
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Once formula."""
return PPLTLSince([PPLTLTrue(), self.f]).to_mona(v)
# def to_pldlf(self):
# return LDLfDiamond(
# RegExpStar(RegExpPropositional(PLTrue())),
# LDLfAnd([self.f.to_pldlf(), LDLfNot(LDLfEnd())]),
# )
class PPLTLHistorically(PPLTLUnaryOperator):
"""Class for the PPLTL Historically formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.HISTORICALLY.value
def to_nnf(self) -> PPLTLFormula:
"""Transform to NNF."""
return PPLTLPastRelease([PPLTLFalse(), self.f.to_nnf()])
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Historically formula."""
return PPLTLNot(PPLTLOnce(PPLTLNot(self.f))).to_mona(v)
class PPLTLStart(PPLTLFormula):
"""Class for the PPLTL Start formula."""
# def to_nnf(self) -> PPLTLFormula:
# """Transform to NNF."""
# return PPLTLAnd([PPLTLWeakBefore(PPLTLFalse()), PPLTLNot(PPLTLEnd())]).to_nnf()
def negate(self) -> PPLTLFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return []
def _members(self):
return (Symbols.START.value,)
def __str__(self):
"""Get the string representation."""
return Symbols.START.value
def to_mona(self, v="max($)") -> str:
"""Return the MONA encoding of a PPLTL Start formula."""
return PPLTLWeakBefore(PPLTLFalse()).to_mona(v)
# class PPLTLEnd(PPLTLFormula):
# """Class for the PPLTL End formula."""
#
# def find_labels(self) -> List[AtomSymbol]:
# """Find the labels."""
# return []
#
# def _members(self):
# return (Symbols.END.value,)
#
# def to_nnf(self) -> PPLTLFormula:
# """Transform to NNF."""
# return PPLTLHistorically(PPLTLFalse()).to_nnf()
#
# def negate(self) -> PPLTLFormula:
# """Negate the formula."""
# return self.to_nnf().negate()
#
# def __str__(self):
# """Get the string representation."""
# return "_".join(map(str, self._members()))
| 15,396 | 29.309055 | 109 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/ltlf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains the implementation of Linear Temporal Logic on finite traces."""
import re
from abc import ABC, abstractmethod
from typing import Any, List, Optional
from ltlf2dfa.base import (
AtomicFormula,
AtomSymbol,
BinaryOperator,
Formula,
UnaryOperator,
)
from ltlf2dfa.helpers import new_var
from ltlf2dfa.ltlf2dfa import to_dfa
from ltlf2dfa.pl import PLAtomic
from ltlf2dfa.symbols import OpSymbol, Symbols
class LTLfFormula(Formula, ABC):
"""A class for the LTLf formula."""
def to_nnf(self) -> "LTLfFormula":
"""Convert an LTLf formula in NNF."""
return self
@abstractmethod
def negate(self) -> "LTLfFormula":
"""Negate the formula."""
def __repr__(self):
"""Get the representation."""
return self.__str__()
def to_mona(self, v: Optional[Any] = None) -> str:
"""
Tranform the LTLf formula into its encoding in MONA.
:return: a string.
"""
raise NotImplementedError()
def to_ldlf(self):
"""
Tranform the formula into an equivalent LDLf formula.
:return: an LDLf formula.
"""
def to_dfa(self, mona_dfa_out: bool = False) -> str:
"""
Translate into a DFA.
:param mona_dfa_out: flag for DFA output in MONA syntax.
"""
return to_dfa(self, mona_dfa_out)
class LTLfUnaryOperator(UnaryOperator[LTLfFormula], LTLfFormula, ABC):
"""A unary operator for LTLf."""
class LTLfBinaryOperator(BinaryOperator[LTLfFormula], LTLfFormula, ABC):
"""A binary operator for LTLf."""
class LTLfAtomic(AtomicFormula, LTLfFormula):
"""Class for LTLf atomic formulas."""
name_regex = re.compile(r"[a-z][a-z0-9_]*")
def negate(self):
"""Negate the formula."""
return LTLfNot(self)
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return PLAtomic(self.s).find_labels()
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf atomic formula."""
if v != "0":
return f"({v} in {self.s.upper()})"
return PLAtomic(self.s).to_mona()
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfDiamond(RegExpPropositional(PLAtomic(self.s)), LDLfLogicalTrue())
class LTLfTrue(LTLfAtomic):
"""Class for the LTLf True formula."""
def __init__(self):
"""Initialize the formula."""
super().__init__(Symbols.TRUE.value)
def negate(self):
"""Negate the formula."""
return LTLfFalse()
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return []
def to_mona(self, v="0") -> str:
"""Return the MONA encoding for False."""
return Symbols.TRUE.value
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfDiamond(RegExpPropositional(PLTrue()), LDLfLogicalTrue())
class LTLfFalse(LTLfAtomic):
"""Class for the LTLf False formula."""
def __init__(self):
"""Initialize the formula."""
super().__init__(Symbols.FALSE.value)
def negate(self):
"""Negate the formula."""
return LTLfTrue()
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return []
def to_mona(self, v="0") -> str:
"""Return the MONA encoding for False."""
return Symbols.FALSE.value
class LTLfNot(LTLfUnaryOperator):
"""Class for the LTLf not formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.NOT.value
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
if not isinstance(self.f, AtomicFormula):
return self.f.negate().to_nnf()
return self
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return self.f
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Not formula."""
return f"~({self.f.to_mona(v)})"
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfNot(self.f.to_ldlf())
class LTLfAnd(LTLfBinaryOperator):
"""Class for the LTLf And formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.AND.value
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return LTLfOr([f.negate() for f in self.formulas])
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf And formula."""
return f"({' & '.join([f.to_mona(v) for f in self.formulas])})"
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfAnd([f.to_ldlf() for f in self.formulas])
class LTLfOr(LTLfBinaryOperator):
"""Class for the LTLf Or formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.OR.value
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return LTLfAnd([f.negate() for f in self.formulas])
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Or formula."""
return f"({' | '.join([f.to_mona(v) for f in self.formulas])})"
# def to_ldlf(self):
# """Convert LTLf formula to LDLf."""
# return LDLfOr([f.to_ldlf() for f in self.formulas])
class LTLfImplies(LTLfBinaryOperator):
"""Class for the LTLf Implication formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.IMPLIES.value
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
first, second = self.formulas[0:2]
final_formula = LTLfOr([LTLfNot(first).to_nnf(), second.to_nnf()])
for subformula in self.formulas[2:]:
final_formula = LTLfOr(
[LTLfNot(final_formula).to_nnf(), subformula.to_nnf()]
)
return final_formula
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Implication formula."""
return self.to_nnf().to_mona(v)
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# f1 = (
# LTLfImplies(self.formulas[:-1]).to_ldlf()
# if len(self.formulas) > 2
# else self.formulas[0].to_ldlf()
# )
# f2 = self.formulas[-1].to_ldlf()
# return LDLfOr([LDLfNot(f1), f2])
class LTLfEquivalence(LTLfBinaryOperator):
"""Class for the LTLf Equivalente formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.EQUIVALENCE.value
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
fs = self.formulas
pos = LTLfAnd(fs)
neg = LTLfAnd([LTLfNot(f) for f in fs])
res = LTLfOr([pos, neg]).to_nnf()
return res
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Equivalence formula."""
return self.to_nnf().to_mona(v)
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# f1 = (
# LTLfImplies(self.formulas[:-1]).to_ldlf()
# if len(self.formulas) > 2
# else self.formulas[0].to_ldlf()
# )
# f2 = self.formulas[-1].to_ldlf()
# return LDLfAnd([LDLfOr([LDLfNot(f1), f2]), LDLfOr([f1, LDLfNot(f2)])])
class LTLfNext(LTLfUnaryOperator):
"""Class for the LTLf Next formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.NEXT.value
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
return LTLfNext(self.f.to_nnf())
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return LTLfWeakNext(self.f.negate())
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Next formula."""
ex_var = new_var(v)
if v != "0":
return f"(ex1 {ex_var}: {ex_var} in $ & {ex_var}={v}+1 & {self.f.to_mona(ex_var)})"
return f"(ex1 {ex_var}: {ex_var} in $ & {ex_var}=1 & {self.f.to_mona(ex_var)})"
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfDiamond(
# RegExpPropositional(PLTrue()),
# LDLfAnd([self.f.to_ldlf(), LDLfNot(LDLfEnd())]),
# )
class LTLfWeakNext(LTLfUnaryOperator):
"""Class for the LTLf Weak Next formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.WEAK_NEXT.value
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
return LTLfWeakNext(self.f.to_nnf())
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return LTLfNext(self.f.negate())
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf WeakNext formula."""
ex_var = new_var(v)
if v != "0":
return f"(({v} = max($)) | (ex1 {ex_var}: {ex_var} in $ & {ex_var}={v}+1 & {self.f.to_mona(ex_var)}))"
return f"((0 = max($)) | (ex1 {ex_var}: {ex_var} in $ & {ex_var}=1 & {self.f.to_mona(ex_var)}))"
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfBox(
# RegExpPropositional(PLTrue()), LDLfOr([self.f.to_ldlf(), LDLfEnd()])
# )
class LTLfUntil(LTLfBinaryOperator):
"""Class for the LTLf Until formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.UNTIL.value
def to_nnf(self):
"""Transform to NNF."""
return LTLfUntil([f.to_nnf() for f in self.formulas])
def negate(self):
"""Negate the formula."""
return LTLfRelease([f.negate() for f in self.formulas])
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Until formula."""
ex_var = new_var(v)
all_var = new_var(ex_var)
f1 = self.formulas[0].to_mona(v=all_var)
f2 = (
LTLfUntil(self.formulas[1:]).to_mona(v=ex_var)
if len(self.formulas) > 2
else self.formulas[1].to_mona(v=ex_var)
)
return (
f"(ex1 {ex_var}: {ex_var} in $ & {v}<={ex_var}&{ex_var}<=max($) & {f2} & "
f"(all1 {all_var}: {all_var} in $ & {v}<={all_var}&{all_var}<{ex_var} => {f1}))"
)
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# f1 = self.formulas[0].to_ldlf()
# f2 = (
# LTLfUntil(self.formulas[1:]).to_ldlf()
# if len(self.formulas) > 2
# else self.formulas[1].to_ldlf()
# )
# return LDLfDiamond(
# RegExpStar(RegExpSequence([RegExpTest(f1), RegExpPropositional(PLTrue())])),
# LDLfAnd([f2, LDLfNot(LDLfEnd())]),
# )
class LTLfRelease(LTLfBinaryOperator):
"""Class for the LTLf Release formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.RELEASE.value
def to_nnf(self):
"""Transform to NNF."""
return LTLfRelease([f.to_nnf() for f in self.formulas])
def negate(self):
"""Negate the formula."""
return LTLfUntil([f.negate() for f in self.formulas])
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Release formula."""
ex_var = new_var(v)
all_var = new_var(ex_var)
f1 = self.formulas[0].to_mona(v=ex_var)
f2 = (
LTLfRelease(self.formulas[1:]).to_mona(v=all_var)
if len(self.formulas) > 2
else self.formulas[1].to_mona(v=all_var)
)
return (
f"((ex1 {ex_var}: {ex_var} in $ & {v}<={ex_var}&{ex_var}<=max($) & {f1} & "
f"(all1 {all_var}: {all_var} in $ & {v}<={all_var}&{all_var}<={ex_var} => {f2})) | (all1 {all_var}: "
f"{all_var} in $ & {v}<={all_var}&{all_var}<=max($) => {f2}))"
)
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# f1 = self.formulas[0].to_ldlf()
# f2 = (
# LTLfRelease(self.formulas[1:]).to_ldlf()
# if len(self.formulas) > 2
# else self.formulas[1].to_ldlf()
# )
# return LDLfBox(
# RegExpStar(
# RegExpSequence([RegExpTest(LDLfNot(f1)), RegExpPropositional(PLTrue())])
# ),
# LDLfOr([f2, LDLfEnd()]),
# )
class LTLfEventually(LTLfUnaryOperator):
"""Class for the LTLf Eventually formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.EVENTUALLY.value
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
return LTLfUntil([LTLfTrue(), self.f])
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Eventually formula."""
return LTLfUntil([LTLfTrue(), self.f]).to_mona(v)
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfDiamond(
# RegExpStar(RegExpPropositional(PLTrue())),
# LDLfAnd([self.f.to_ldlf(), LDLfNot(LDLfEnd())]),
# )
class LTLfAlways(LTLfUnaryOperator):
"""Class for the LTLf Always formula."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.ALWAYS.value
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
return LTLfRelease([LTLfFalse(), self.f.to_nnf()])
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Always formula."""
return LTLfRelease([LTLfFalse(), self.f]).to_mona(v)
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfBox(
# RegExpStar(RegExpPropositional(PLTrue())),
# LDLfOr([self.f.to_ldlf(), LDLfEnd()]),
# )
class LTLfLast(LTLfFormula):
"""Class for the LTLf Last formula."""
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
return LTLfAnd([LTLfWeakNext(LTLfFalse()), LTLfNot(LTLfEnd())]).to_nnf()
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return []
def _members(self):
return (Symbols.LAST.value,)
def __str__(self):
"""Get the string representation."""
return Symbols.LAST.value
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of an LTLf Last formula."""
return LTLfWeakNext(LTLfFalse()).to_mona(v)
# def to_ldlf(self):
# """Convert the formula to LDLf."""
# return LDLfDiamond(RegExpPropositional(PLTrue()), LDLfEnd())
class LTLfEnd(LTLfFormula):
"""Class for the LTLf End formula."""
def find_labels(self) -> List[AtomSymbol]:
"""Find the labels."""
return []
def _members(self):
return (Symbols.END.value,)
def to_nnf(self) -> LTLfFormula:
"""Transform to NNF."""
return LTLfAlways(LTLfFalse()).to_nnf()
def negate(self) -> LTLfFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def __str__(self):
"""Get the string representation."""
return "_".join(map(str, self._members()))
| 16,822 | 29.202873 | 114 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/pl.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This module provides support for Propositional Logic."""
import functools
from abc import ABC, abstractmethod
from typing import Any, List, Optional, Set
from ltlf2dfa.base import AtomicFormula, BinaryOperator, Formula, UnaryOperator
from ltlf2dfa.symbols import OpSymbol, Symbols
class PLFormula(Formula):
"""A class to represent propositional formulas."""
def __init__(self):
"""Initialize a PL formula."""
Formula.__init__(self)
self._atoms = None # type: Optional[Set[PLAtomic]]
def __repr__(self):
"""Return a representation of the formula."""
return str(self)
def find_atomics(self) -> Set["PLAtomic"]:
"""
Find all the atomic formulas in the propositional formulas.
That is, find the leaves in the syntax tree.
:return: the set of atomic formulas.
"""
if self._atoms is None:
self._atoms = self._find_atomics()
return self._atoms
@abstractmethod
def _find_atomics(self) -> Set["PLAtomic"]:
"""Find all the atomic formulas in the propositional formulas."""
@abstractmethod
def negate(self) -> "PLFormula":
"""Negate the formula. Used by 'to_nnf'."""
def to_mona(self, v: Optional[Any] = None) -> str:
"""
Tranform the PL formula into its encoding in MONA.
:return: a string.
"""
raise NotImplementedError()
class PLAtomic(AtomicFormula, PLFormula):
"""A class to represent propositional atomic formulas."""
def find_labels(self) -> List[Any]:
"""Return the list of symbols."""
return [self.s]
def _find_atomics(self):
return {self}
def negate(self) -> PLFormula:
"""Negate the formula."""
return PLNot(self)
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of a PL atomic formula."""
return f"({v} in {self.s.upper()})"
class PLBinaryOperator(BinaryOperator[PLFormula], PLFormula, ABC):
"""An operator for Propositional Logic."""
def _find_atomics(self) -> Set[PLAtomic]:
return functools.reduce(
set.union, [f.find_atomics() for f in self.formulas] # type: ignore
)
class PLTrue(PLAtomic):
"""Propositional true."""
def __init__(self):
"""Initialize the PL true formula."""
PLAtomic.__init__(self, Symbols.TRUE.value)
def negate(self) -> "PLFalse":
"""Negate the formula."""
return PLFalse()
def find_labels(self) -> List[Any]:
"""Return the list of symbols."""
return []
def to_nnf(self):
"""Transform in NNF."""
return self
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of a PL atomic formula."""
return Symbols.TRUE.value
class PLFalse(PLAtomic):
"""Propositional false."""
def __init__(self):
"""Initialize the formula."""
PLAtomic.__init__(self, Symbols.FALSE.value)
def negate(self) -> "PLTrue":
"""Negate the formula."""
return PLTrue()
def find_labels(self) -> List[Any]:
"""Return the list of symbols."""
return []
def to_nnf(self):
"""Transform in NNF."""
return self
def to_mona(self, v="0") -> str:
"""Return the MONA encoding of a PL atomic formula."""
return Symbols.FALSE.value
class PLNot(UnaryOperator[PLFormula], PLFormula):
"""Propositional Not."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.NOT.value
def to_nnf(self):
"""Transform in NNF."""
if not isinstance(self.f, AtomicFormula):
return self.f.negate().to_nnf()
return self
def negate(self) -> PLFormula:
"""Negate the formula."""
return self.f
def _find_atomics(self) -> Set["PLAtomic"]:
return self.f.find_atomics()
class PLOr(PLBinaryOperator):
"""Propositional Or."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.OR.value
def to_nnf(self):
"""Transform in NNF."""
return PLOr([f.to_nnf() for f in self.formulas])
def negate(self) -> PLFormula:
"""Negate the formula."""
return PLAnd([f.negate() for f in self.formulas])
class PLAnd(PLBinaryOperator):
"""Propositional And."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.AND.value
def to_nnf(self):
"""Transform in NNF."""
return PLAnd([f.to_nnf() for f in self.formulas])
def negate(self) -> PLFormula:
"""Negate the formula."""
return PLOr([f.negate() for f in self.formulas])
class PLImplies(PLBinaryOperator):
"""Propositional Implication."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.IMPLIES.value
def negate(self) -> PLFormula:
"""Negate the formula."""
return self.to_nnf().negate()
def to_nnf(self):
"""Transform in NNF."""
first, second = self.formulas[0:2]
final_formula = PLOr([PLNot(first).to_nnf(), second.to_nnf()])
for subformula in self.formulas[2:]:
final_formula = PLOr([PLNot(final_formula).to_nnf(), subformula.to_nnf()])
return final_formula
class PLEquivalence(PLBinaryOperator):
"""Propositional Equivalence."""
@property
def operator_symbol(self) -> OpSymbol:
"""Get the operator symbol."""
return Symbols.EQUIVALENCE.value
def to_nnf(self):
"""Transform in NNF."""
fs = self.formulas
pos = PLAnd(fs)
neg = PLAnd([PLNot(f) for f in fs])
res = PLOr([pos, neg]).to_nnf()
return res
def negate(self) -> PLFormula:
"""Negate the formula."""
return self.to_nnf().negate()
| 6,697 | 26.563786 | 86 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/symbols.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains the definition to deal with symbols."""
from enum import Enum
from typing import Set
OpSymbol = str
class Symbols(Enum):
"""A set of symbols that can be used in a logical formula."""
NOT = "!"
AND = "&"
OR = "|"
EQUAL = "="
IMPLIES = "->"
EQUIVALENCE = "<->"
NEXT = "X"
WEAK_NEXT = "WX"
UNTIL = "U"
RELEASE = "R"
EVENTUALLY = "F"
ALWAYS = "G"
BEFORE = "Y"
WEAK_BEFORE = "WY"
ONCE = "O"
SINCE = "S"
PAST_RELEASE = "P"
HISTORICALLY = "H"
ROUND_BRACKET_LEFT = "("
ROUND_BRACKET_RIGHT = ")"
LAST = "last"
START = "start"
END = "end"
TRUE = "true"
FALSE = "false"
ALL_SYMBOLS: Set[str] = {v.value for v in Symbols}
| 1,454 | 24.086207 | 70 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Top-level package for ltlf2dfa."""
from .helpers import _get_current_path
_ROOT_PATH = _get_current_path()
| 822 | 31.92 | 70 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/helpers.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Helpers module."""
from abc import ABC, abstractmethod
from copy import copy
from pathlib import Path
from ltlf2dfa.symbols import Symbols
ParsingError = ValueError("Parsing error.")
class Hashable(ABC):
"""A base class to represent hashable objects."""
def __init__(self):
"""Initialize."""
self._hash = None
@abstractmethod
def _members(self):
raise NotImplementedError
def __eq__(self, other):
"""Compare."""
if type(other) is type(self):
return self._members() == other._members()
return False
def __hash__(self):
"""Compute the hash."""
if self._hash is None:
members = self._members()
self._hash = hash(members)
return self._hash
def __getstate__(self):
"""Get the state."""
d = copy(self.__dict__)
d.pop("_hash")
return d
def __setstate__(self, state):
"""Set the state."""
self.__dict__ = state
self._hash = None
class Wrapper(Hashable):
"""Wrap other objects and expose the same interface.
This helper class can be subclassed to create a constant view on wrapped
objects, exposing the same interface.
This is an immutable object: either add members to _mutable list, or
modify them through __dict__.
"""
_mutable = ["_hash"]
def __init__(self, obj):
"""Initialize: save the wrapped object."""
super().__init__()
self.__dict__["_Wrapper__obj"] = obj
def __str__(self):
"""Just forward to obj."""
return str(self.__obj)
def __repr__(self):
"""Just forward to obj."""
return repr(self.__obj)
def __getattr__(self, attr):
"""Redirect to obj."""
return getattr(self.__obj, attr)
def __setattr__(self, attr, value):
"""If immutable, raises an error."""
if attr in self._mutable:
self.__dict__[attr] = value
else:
raise AttributeError("Can't modify: immutable object.")
def __delattr__(self, attr):
"""Raise an error, because del is not supported."""
raise AttributeError("Can't modify: immutable object.")
def __dir__(self):
"""Expose the same interface as wrapped."""
members = set(dir(self.__obj)).union(object.__dir__(self))
return sorted(members)
def _members(self):
return self.__obj
@property
def wrapped(self):
"""Return the wrapped object."""
return self.__obj
def new_var(prev_var: str) -> str:
"""Compute next variable."""
if prev_var in ("0", "max($)"):
return "v_1"
s = prev_var.split("_")
s[1] = str(int(s[1]) + 1)
return "_".join(s)
def sym2regexp(sym: Symbols):
"""Transform a symbol to regex."""
s = sym.value
if s in r"|()+?*.[]":
return r"\%s" % s
return s
def check_(condition: bool, message: str = "") -> None:
"""
User-defined assert.
This function is useful to avoid the use of the built-in assert statement, which is removed
when the code is compiled in optimized mode. For more information, see
https://bandit.readthedocs.io/en/1.7.5/plugins/b101_assert_used.html
"""
if not condition:
raise AssertionError(message)
def _get_current_path() -> Path:
"""Get the path to the file where the function is called."""
import inspect
import os
return Path(os.path.dirname(inspect.getfile(inspect.currentframe()))).parent # type: ignore
MAX_CACHE_SIZE = 1024
| 4,286 | 26.132911 | 96 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/parser/ppltl.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Implementation of the PPLTL parser."""
from lark import Lark, Transformer
from ltlf2dfa.helpers import ParsingError, check_
from ltlf2dfa.parser import PARSERS_DIRECTORY, PPLTL_GRAMMAR_FILE
from ltlf2dfa.parser.pl import PLTransformer
from ltlf2dfa.ppltl import (
PPLTLAnd,
PPLTLAtomic,
PPLTLBefore,
PPLTLEquivalence,
PPLTLFalse,
PPLTLHistorically,
PPLTLImplies,
PPLTLNot,
PPLTLOnce,
PPLTLOr,
PPLTLPastRelease,
PPLTLSince,
PPLTLStart,
PPLTLTrue,
PPLTLWeakBefore,
)
class PPLTLTransformer(Transformer):
"""PPLTL Transformer."""
def __init__(self):
"""Initialize."""
super().__init__()
self._pl_transformer = PLTransformer()
def start(self, args):
"""Entry point."""
check_(len(args) == 1)
return args[0]
def ppltl_formula(self, args):
"""Parse PPLTL formula."""
check_(len(args) == 1)
return args[0]
def ppltl_equivalence(self, args):
"""Parse PPLTL Equivalence."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PPLTLEquivalence(subformulas)
raise ParsingError
def ppltl_implication(self, args):
"""Parse PPLTL Implication."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PPLTLImplies(subformulas)
raise ParsingError
def ppltl_or(self, args):
"""Parse PPLTL Or."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PPLTLOr(subformulas)
raise ParsingError
def ppltl_and(self, args):
"""Parse PPLTL And."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PPLTLAnd(subformulas)
raise ParsingError
def ppltl_since(self, args):
"""Parse PPLTL Since."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PPLTLSince(subformulas)
raise ParsingError
def ppltl_pastrelease(self, args):
"""Parse PPLTL Past Release."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PPLTLPastRelease(subformulas)
raise ParsingError
def ppltl_historically(self, args):
"""Parse PPLTL Historically."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = PPLTLHistorically(f)
return f
def ppltl_once(self, args):
"""Parse PPLTL Once."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = PPLTLOnce(f)
return f
def ppltl_before(self, args):
"""Parse PPLTL Before."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = PPLTLBefore(f)
return f
def ppltl_weak_before(self, args):
"""Parse PPLTL Weak Before."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = PPLTLWeakBefore(f)
return f
def ppltl_not(self, args):
"""Parse PPLTL Not."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = PPLTLNot(f)
return f
def ppltl_wrapped(self, args):
"""Parse PPLTL wrapped formula."""
if len(args) == 1:
return args[0]
if len(args) == 3:
_, formula, _ = args
return formula
raise ParsingError
def ppltl_atom(self, args):
"""Parse PPLTL Atom."""
check_(len(args) == 1)
return args[0]
def ppltl_true(self, _args):
"""Parse PPLTL True."""
return PPLTLTrue()
def ppltl_false(self, _args):
"""Parse PPLTL False."""
return PPLTLFalse()
def ppltl_start(self, _args):
"""Parse PPLTL Last."""
return PPLTLStart()
# def ppltl_end(self, args):
# raise NotImplementedError("PPLTL end not supported, yet")
def ppltl_symbol(self, args):
"""Parse PPLTL Symbol."""
check_(len(args) == 1)
token = args[0]
symbol = str(token)
return PPLTLAtomic(symbol)
_ppltl_parser_lark = PPLTL_GRAMMAR_FILE.read_text()
class PPLTLParser:
"""PPLTL Parser class."""
def __init__(self):
"""Initialize."""
self._transformer = PPLTLTransformer()
self._parser = Lark(
_ppltl_parser_lark, parser="lalr", import_paths=[PARSERS_DIRECTORY]
)
def __call__(self, text):
"""Call."""
tree = self._parser.parse(text)
formula = self._transformer.transform(tree)
return formula
| 5,830 | 25.870968 | 79 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/parser/ltlf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Implementation of the LTLf parser."""
from lark import Lark, Transformer
from ltlf2dfa.helpers import ParsingError, check_
from ltlf2dfa.ltlf import (
LTLfAlways,
LTLfAnd,
LTLfAtomic,
LTLfEquivalence,
LTLfEventually,
LTLfFalse,
LTLfImplies,
LTLfLast,
LTLfNext,
LTLfNot,
LTLfOr,
LTLfRelease,
LTLfTrue,
LTLfUntil,
LTLfWeakNext,
)
from ltlf2dfa.parser import LTLF_GRAMMAR_FILE, PARSERS_DIRECTORY
from ltlf2dfa.parser.pl import PLTransformer
class LTLfTransformer(Transformer):
"""LTLf Transformer."""
def __init__(self):
"""Initialize."""
super().__init__()
self._pl_transformer = PLTransformer()
def start(self, args):
"""Entry point."""
check_(len(args) == 1)
return args[0]
def ltlf_formula(self, args):
"""Parse LTLf formula."""
check_(len(args) == 1)
return args[0]
def ltlf_equivalence(self, args):
"""Parse LTLf Equivalence."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return LTLfEquivalence(subformulas)
raise ParsingError
def ltlf_implication(self, args):
"""Parse LTLf Implication."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return LTLfImplies(subformulas)
raise ParsingError
def ltlf_or(self, args):
"""Parse LTLf Or."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return LTLfOr(subformulas)
raise ParsingError
def ltlf_and(self, args):
"""Parse LTLf And."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return LTLfAnd(subformulas)
raise ParsingError
def ltlf_until(self, args):
"""Parse LTLf Until."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return LTLfUntil(subformulas)
raise ParsingError
def ltlf_release(self, args):
"""Parse LTLf Release."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return LTLfRelease(subformulas)
raise ParsingError
def ltlf_always(self, args):
"""Parse LTLf Always."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = LTLfAlways(f)
return f
def ltlf_eventually(self, args):
"""Parse LTLf Eventually."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = LTLfEventually(f)
return f
def ltlf_next(self, args):
"""Parse LTLf Next."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = LTLfNext(f)
return f
def ltlf_weak_next(self, args):
"""Parse LTLf Weak Next."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = LTLfWeakNext(f)
return f
def ltlf_not(self, args):
"""Parse LTLf Not."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = LTLfNot(f)
return f
def ltlf_wrapped(self, args):
"""Parse LTLf wrapped formula."""
if len(args) == 1:
return args[0]
if len(args) == 3:
_, formula, _ = args
return formula
raise ParsingError
def ltlf_atom(self, args):
"""Parse LTLf Atom."""
check_(len(args) == 1)
return args[0]
def ltlf_true(self, _args):
"""Parse LTLf True."""
return LTLfTrue()
def ltlf_false(self, _args):
"""Parse LTLf False."""
return LTLfFalse()
def ltlf_last(self, _args):
"""Parse LTLf Last."""
return LTLfLast()
# def ltlf_end(self, _args):
# raise NotImplementedError("LTLf end not supported, yet")
def ltlf_symbol(self, args):
"""Parse LTLf Symbol."""
check_(len(args) == 1)
token = args[0]
symbol = str(token)
return LTLfAtomic(symbol)
_ltlf_parser_lark = LTLF_GRAMMAR_FILE.read_text()
class LTLfParser:
"""LTLf Parser class."""
def __init__(self):
"""Initialize."""
self._transformer = LTLfTransformer()
self._parser = Lark(
_ltlf_parser_lark, parser="lalr", import_paths=[PARSERS_DIRECTORY]
)
def __call__(self, text):
"""Call."""
tree = self._parser.parse(text)
formula = self._transformer.transform(tree)
return formula
| 5,716 | 25.345622 | 78 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/parser/pl.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Implementation of the PL parser."""
from lark import Lark, Transformer
from ltlf2dfa.helpers import ParsingError, check_
from ltlf2dfa.parser import PARSERS_DIRECTORY, PL_GRAMMAR_FILE
from ltlf2dfa.pl import (
PLAnd,
PLAtomic,
PLEquivalence,
PLFalse,
PLImplies,
PLNot,
PLOr,
PLTrue,
)
class PLTransformer(Transformer):
"""PL Transformer."""
def start(self, args):
"""Entry point."""
return args[0]
def propositional_formula(self, args):
"""Parse Propositional formula."""
check_(len(args) == 1)
return args[0]
def prop_equivalence(self, args):
"""Parse Propositional Equivalence."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PLEquivalence(subformulas)
raise ParsingError
def prop_implication(self, args):
"""Parse Propositional Implication."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PLImplies(subformulas)
raise ParsingError
def prop_or(self, args):
"""Parse Propositional Or."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PLOr(subformulas)
raise ParsingError
def prop_and(self, args):
"""Parse Propositional And."""
if len(args) == 1:
return args[0]
if (len(args) - 1) % 2 == 0:
subformulas = args[::2]
return PLAnd(subformulas)
raise ParsingError
def prop_not(self, args):
"""Parse Propositional Not."""
if len(args) == 1:
return args[0]
f = args[-1]
for _ in args[:-1]:
f = PLNot(f)
return f
def prop_wrapped(self, args):
"""Parse Propositional wrapped formula."""
if len(args) == 1:
return args[0]
if len(args) == 3:
_, f, _ = args
return f
raise ParsingError
def prop_atom(self, args):
"""Parse Propositional Atom."""
check_(len(args) == 1)
return args[0]
def prop_true(self, args):
"""Parse Propositional True."""
check_(len(args) == 1)
return PLTrue()
def prop_false(self, args):
"""Parse Propositional False."""
check_(len(args) == 1)
return PLFalse()
def atom(self, args):
"""Parse Atom."""
check_(len(args) == 1)
return PLAtomic(str(args[0]))
_pl_parser_lark = PL_GRAMMAR_FILE.read_text()
class PLParser:
"""PL Parser class."""
def __init__(self):
"""Initialize."""
self._transformer = PLTransformer()
self._parser = Lark(
_pl_parser_lark, parser="lalr", import_paths=[PARSERS_DIRECTORY]
)
def __call__(self, text):
"""Call."""
tree = self._parser.parse(text)
formula = self._transformer.transform(tree)
return formula
| 3,833 | 26 | 76 | py |
LTLf2DFA | LTLf2DFA-main/ltlf2dfa/parser/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains the implementation of the parsers for the supported logic formalisms."""
from ltlf2dfa import _ROOT_PATH
PARSERS_DIRECTORY = _ROOT_PATH / "ltlf2dfa" / "parser"
PL_GRAMMAR_FILE = PARSERS_DIRECTORY / "pl.lark"
LTLF_GRAMMAR_FILE = PARSERS_DIRECTORY / "ltlf.lark"
PPLTL_GRAMMAR_FILE = PARSERS_DIRECTORY / "ppltl.lark"
| 1,049 | 37.888889 | 96 | py |
LTLf2DFA | LTLf2DFA-main/scripts/whitelist.py | # flake8: noqa
# type: ignore
# pylint: skip-file
base_expression # unused variable (ltlf2dfa/base.py:167)
_ # unused function (ltlf2dfa/base.py:274)
_.wrapped # unused property (ltlf2dfa/helpers.py:114)
sym2regexp # unused function (ltlf2dfa/helpers.py:130)
MAX_CACHE_SIZE # unused variable (ltlf2dfa/helpers.py:139)
_.to_ldlf # unused method (ltlf2dfa/ltlf.py:61)
compute_declare_assumption # unused function (ltlf2dfa/ltlf2dfa.py:138)
error # unused variable (ltlf2dfa/ltlf2dfa.py:187)
_._pl_transformer # unused attribute (ltlf2dfa/parser/ltlf.py:53)
_.start # unused method (ltlf2dfa/parser/ltlf.py:55)
_.ltlf_formula # unused method (ltlf2dfa/parser/ltlf.py:60)
_.ltlf_equivalence # unused method (ltlf2dfa/parser/ltlf.py:65)
_.ltlf_implication # unused method (ltlf2dfa/parser/ltlf.py:75)
_.ltlf_or # unused method (ltlf2dfa/parser/ltlf.py:85)
_.ltlf_and # unused method (ltlf2dfa/parser/ltlf.py:95)
_.ltlf_until # unused method (ltlf2dfa/parser/ltlf.py:105)
_.ltlf_release # unused method (ltlf2dfa/parser/ltlf.py:115)
_.ltlf_always # unused method (ltlf2dfa/parser/ltlf.py:125)
_.ltlf_eventually # unused method (ltlf2dfa/parser/ltlf.py:135)
_.ltlf_next # unused method (ltlf2dfa/parser/ltlf.py:145)
_.ltlf_weak_next # unused method (ltlf2dfa/parser/ltlf.py:155)
_.ltlf_not # unused method (ltlf2dfa/parser/ltlf.py:165)
_.ltlf_wrapped # unused method (ltlf2dfa/parser/ltlf.py:175)
_.ltlf_atom # unused method (ltlf2dfa/parser/ltlf.py:185)
_.ltlf_true # unused method (ltlf2dfa/parser/ltlf.py:190)
_.ltlf_false # unused method (ltlf2dfa/parser/ltlf.py:194)
_.ltlf_last # unused method (ltlf2dfa/parser/ltlf.py:198)
_.ltlf_symbol # unused method (ltlf2dfa/parser/ltlf.py:205)
_.start # unused method (ltlf2dfa/parser/pl.py:42)
_.propositional_formula # unused method (ltlf2dfa/parser/pl.py:46)
_.prop_equivalence # unused method (ltlf2dfa/parser/pl.py:51)
_.prop_implication # unused method (ltlf2dfa/parser/pl.py:61)
_.prop_or # unused method (ltlf2dfa/parser/pl.py:71)
_.prop_and # unused method (ltlf2dfa/parser/pl.py:81)
_.prop_not # unused method (ltlf2dfa/parser/pl.py:91)
_.prop_wrapped # unused method (ltlf2dfa/parser/pl.py:101)
_.prop_atom # unused method (ltlf2dfa/parser/pl.py:111)
_.prop_true # unused method (ltlf2dfa/parser/pl.py:116)
_.prop_false # unused method (ltlf2dfa/parser/pl.py:121)
_.atom # unused method (ltlf2dfa/parser/pl.py:126)
PPLTLBefore # unused import (ltlf2dfa/parser/ppltl.py:28)
PPLTLHistorically # unused import (ltlf2dfa/parser/ppltl.py:28)
PPLTLOnce # unused import (ltlf2dfa/parser/ppltl.py:28)
PPLTLStart # unused import (ltlf2dfa/parser/ppltl.py:28)
PPLTLTrue # unused import (ltlf2dfa/parser/ppltl.py:28)
PPLTLWeakBefore # unused import (ltlf2dfa/parser/ppltl.py:28)
PPLTLTransformer # unused class (ltlf2dfa/parser/ppltl.py:47)
_._pl_transformer # unused attribute (ltlf2dfa/parser/ppltl.py:53)
_.start # unused method (ltlf2dfa/parser/ppltl.py:55)
_.ppltl_formula # unused method (ltlf2dfa/parser/ppltl.py:60)
_.ppltl_equivalence # unused method (ltlf2dfa/parser/ppltl.py:65)
_.ppltl_implication # unused method (ltlf2dfa/parser/ppltl.py:75)
_.ppltl_or # unused method (ltlf2dfa/parser/ppltl.py:85)
_.ppltl_and # unused method (ltlf2dfa/parser/ppltl.py:95)
_.ppltl_since # unused method (ltlf2dfa/parser/ppltl.py:105)
_.ppltl_pastrelease # unused method (ltlf2dfa/parser/ppltl.py:115)
_.ppltl_historically # unused method (ltlf2dfa/parser/ppltl.py:125)
_.ppltl_once # unused method (ltlf2dfa/parser/ppltl.py:135)
_.ppltl_before # unused method (ltlf2dfa/parser/ppltl.py:145)
_.ppltl_weak_before # unused method (ltlf2dfa/parser/ppltl.py:155)
_.ppltl_not # unused method (ltlf2dfa/parser/ppltl.py:165)
_.ppltl_wrapped # unused method (ltlf2dfa/parser/ppltl.py:175)
_.ppltl_atom # unused method (ltlf2dfa/parser/ppltl.py:185)
_.ppltl_true # unused method (ltlf2dfa/parser/ppltl.py:190)
_.ppltl_false # unused method (ltlf2dfa/parser/ppltl.py:194)
_.ppltl_start # unused method (ltlf2dfa/parser/ppltl.py:198)
_.ppltl_symbol # unused method (ltlf2dfa/parser/ppltl.py:205)
PPLTLUnaryOperator # unused class (ltlf2dfa/ppltl.py:77)
PPLTLBinaryOperator # unused class (ltlf2dfa/ppltl.py:81)
PPLTLTrue # unused class (ltlf2dfa/ppltl.py:109)
PPLTLImplies # unused class (ltlf2dfa/ppltl.py:213)
PPLTLEquivalence # unused class (ltlf2dfa/ppltl.py:240)
EQUAL # unused variable (ltlf2dfa/symbols.py:33)
ALL_SYMBOLS # unused variable (ltlf2dfa/symbols.py:57)
| 4,471 | 56.333333 | 72 | py |
LTLf2DFA | LTLf2DFA-main/scripts/check_copyright.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""
This script checks that all the Python files of the repository have:
- (optional) the Python shebang
- the encoding header;
- the copyright notice;
It is assumed the script is run from the repository root.
"""
import itertools
import re
import sys
from pathlib import Path
HEADER_REGEX = r"""(#!/usr/bin/env python3
)?# -\*- coding: utf-8 -\*-
#
# This file is part of ltlf2dfa\.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# \(at your option\) any later version\.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE\. See the
# GNU General Public License for more details\.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa\. If not, see <https://www\.gnu\.org/licenses/>\.
#
"""
def check_copyright(file: Path) -> bool:
"""
Given a file, check if the header stuff is in place.
Return True if the files has the encoding header and the copyright notice,
optionally prefixed by the shebang. Return False otherwise.
:param file: the file to check.
:return True if the file is compliant with the checks, False otherwise.
"""
content = file.read_text()
header_regex = re.compile(HEADER_REGEX, re.MULTILINE)
return re.match(header_regex, content) is not None
def parse_args():
"""Parse arguments."""
import argparse # pylint: disable=import-outside-toplevel
parser = argparse.ArgumentParser("check_copyright_notice")
parser.add_argument(
"--directory", type=str, default=".", help="The path to the repository root."
)
if __name__ == "__main__":
python_files = set(
itertools.chain(
Path("ltlf2dfa").glob("**/*.py"),
Path("tests").glob("**/*.py"),
Path("scripts").glob("**/*.py"),
)
)
ignore_files = {Path("scripts/whitelist.py")}
bad_files = {
filepath
for filepath in python_files.difference(ignore_files)
if not check_copyright(filepath)
}
if len(bad_files) > 0:
print("The following files are not well formatted:")
print("\n".join(map(str, bad_files)))
sys.exit(1)
else:
print("OK!")
sys.exit(0)
| 3,175 | 29.538462 | 85 | py |
LTLf2DFA | LTLf2DFA-main/tests/test_ltlf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Test LTLf."""
import os
import lark
import pytest
from ltlf2dfa.ltlf import (
LTLfAlways,
LTLfAnd,
LTLfAtomic,
LTLfEquivalence,
LTLfEventually,
LTLfFalse,
LTLfImplies,
LTLfLast,
LTLfNext,
LTLfNot,
LTLfOr,
LTLfRelease,
LTLfTrue,
LTLfUntil,
LTLfWeakNext,
)
from ltlf2dfa.parser.ltlf import LTLfParser
# from .conftest import LTLfFixtures
from .parsing import ParsingCheck
# from ltlf2dfa.pl import PLAtomic, PLTrue, PLFalse, PLAnd, PLOr
def test_parser():
parser = LTLfParser()
a, b, c = [LTLfAtomic(c) for c in "abc"]
assert parser("!a | b <-> !(a & !b) <-> a->b") == LTLfEquivalence(
[
LTLfOr([LTLfNot(a), b]),
LTLfNot(LTLfAnd([a, LTLfNot(b)])),
LTLfImplies([a, b]),
]
)
assert parser("(X a) & (WX !b)") == LTLfAnd([LTLfNext(a), LTLfWeakNext(LTLfNot(b))])
assert parser("(F (a&b)) <-> !(G (!a | !b) )") == LTLfEquivalence(
[
LTLfEventually(LTLfAnd([a, b])),
LTLfNot(LTLfAlways(LTLfOr([LTLfNot(a), LTLfNot(b)]))),
]
)
assert parser("(a U b U c) <-> !(!a R !b R !c)") == LTLfEquivalence(
[
LTLfUntil([a, b, c]),
LTLfNot(LTLfRelease([LTLfNot(a), LTLfNot(b), LTLfNot(c)])),
]
)
assert parser("a & last") == LTLfAnd([a, LTLfLast()])
def test_names():
good = ["a", "b", "name", "complex_name", "proposition10"]
bad = ["Future", "X", "$", "", "40a", "niceName"]
for name in good:
str(LTLfAtomic(name)) == name
for name in bad:
with pytest.raises(ValueError):
str(LTLfAtomic(name)) == name
def test_nnf():
parser = LTLfParser()
a, b, c = [LTLfAtomic(c) for c in "abc"]
f = parser("!(a & !b)")
assert f.to_nnf() == LTLfOr([LTLfNot(a), b])
f = parser("!(!a | b)")
assert f.to_nnf() == LTLfAnd([a, LTLfNot(b)])
f = parser("!(a <-> b)")
assert f.to_nnf() == LTLfAnd([LTLfOr([LTLfNot(a), LTLfNot(b)]), LTLfOr([a, b])])
# Next and Weak Next
f = parser("!(X (a & b))")
assert f.to_nnf() == LTLfWeakNext(LTLfOr([LTLfNot(a), LTLfNot(b)]))
f = parser("!(WX (a & b))")
assert f.to_nnf() == LTLfNext(LTLfOr([LTLfNot(a), LTLfNot(b)]))
# Eventually and Always
f = parser("!(F (a | b))")
assert f.to_nnf() == LTLfAlways(LTLfAnd([LTLfNot(a), LTLfNot(b)])).to_nnf()
# Until and Release
f = parser("!(a U b)")
assert f.to_nnf() == LTLfRelease([LTLfNot(a), LTLfNot(b)])
f = parser("!(a R b)")
assert f.to_nnf() == LTLfUntil([LTLfNot(a), LTLfNot(b)])
f = parser("!(F (a | b))")
assert f.to_nnf() == LTLfAlways(LTLfAnd([LTLfNot(a), LTLfNot(b)])).to_nnf()
f = parser("!(G (a | b))")
assert f.to_nnf() == LTLfEventually(LTLfAnd([LTLfNot(a), LTLfNot(b)])).to_nnf()
def test_mona():
parser = LTLfParser()
a, b, c = [LTLfAtomic(c) for c in "abc"]
tt = LTLfTrue()
ff = LTLfFalse()
assert a.to_mona(v="0") == "(0 in A)"
assert b.to_mona(v="0") == "(0 in B)"
assert c.to_mona(v="0") == "(0 in C)"
assert tt.to_mona(v="0") == "true"
assert ff.to_mona(v="0") == "false"
f = parser("!(a & !b)")
assert f.to_mona(v="0") == "~(((0 in A) & ~((0 in B))))"
f = parser("!(!a | b)")
assert f.to_mona(v="0") == "~((~((0 in A)) | (0 in B)))"
f = parser("!(a <-> b)")
assert (
f.to_nnf().to_mona(v="0")
== "((~((0 in A)) | ~((0 in B))) & ((0 in A) | (0 in B)))"
)
f = parser("a & last")
assert (
f.to_mona(v="0")
== "((0 in A) & ((0 = max($)) | (ex1 v_1: v_1 in $ & v_1=1 & false)))"
)
# Next and Weak Next
f = parser("X(a & b)")
assert f.to_mona(v="0") == "(ex1 v_1: v_1 in $ & v_1=1 & ((v_1 in A) & (v_1 in B)))"
f = parser("WX(a)")
assert (
f.to_mona(v="0") == "((0 = max($)) | (ex1 v_1: v_1 in $ & v_1=1 & (v_1 in A)))"
)
# f = parser("F(b & WX false) -> F(a & (WX false | X(WX false)))")
# assert f.to_mona(v="0") == ""
f = parser("WX (a & b)")
assert (
f.to_mona(v="0")
== "((0 = max($)) | (ex1 v_1: v_1 in $ & v_1=1 & ((v_1 in A) & (v_1 in B))))"
)
# Until and Release
f = parser("a U b")
assert (
f.to_mona(v="0")
== "(ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & (v_1 in B) & (all1 v_2: v_2 in $ & 0<=v_2&v_2<v_1"
" => (v_2 in A)))"
)
f = parser("a R b")
assert (
f.to_mona(v="0")
== "((ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & (v_1 in A) & (all1 v_2: v_2 in $ & 0<=v_2&v_2<=v_1"
" => (v_2 in B))) | (all1 v_2: v_2 in $ & 0<=v_2&v_2<=max($) => (v_2 in B)))"
)
# Eventually and Always
f = parser("F(a & b)")
assert (
f.to_mona(v="0")
== "(ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & ((v_1 in A) & (v_1 in B)) & (all1 v_2: "
"v_2 in $ & 0<=v_2&v_2<v_1 => true))"
)
f = parser("G(a | b)")
assert (
f.to_mona(v="0")
== "((ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & false & (all1 v_2: v_2 in $ & 0<=v_2&v_2<=v_1 => "
"((v_2 in A) | (v_2 in B)))) | (all1 v_2: v_2 in $ & 0<=v_2&v_2<=max($) => ((v_2 in A) "
"| (v_2 in B))))"
)
# @pytest.fixture(scope="session", params=LTLfFixtures.ltlf_formulas)
# def ltlf_formula_automa_pair(request):
# formula_obj = parser(request.param)
# automaton = formula_obj.to_automaton()
# return formula_obj, automaton
#
#
# @pytest.fixture(scope="session", params=LTLfFixtures.ltlf_formulas)
# def ltlf_formula_nnf_pair(request):
# formula_obj = parser(request.param)
# nnf = formula_obj.to_nnf()
# return formula_obj, nnf
class TestParsingTree:
@classmethod
def setup_class(cls):
# Path to grammar
this_path = os.path.dirname(os.path.abspath(__file__))
grammar_path = "../ltlf2dfa/parser/ltlf.lark"
grammar_path = os.path.join(this_path, *grammar_path.split("/"))
cls.checker = ParsingCheck(grammar_path)
def test_propositional(self):
ok, err = self.checker.precedence_check("a & !b | c", list("|&a!bc"))
assert ok, err
ok, err = self.checker.precedence_check(
"!a&(b->c)", "&,!,a,(,),->,b,c".split(",")
)
assert ok, err
def test_unary(self):
ok, err = self.checker.precedence_check("X X a", list("XXa"))
assert ok, err
ok, err = self.checker.precedence_check(
"X(G faLse)", "X ( ) G faLse".split(" ")
)
assert ok, err
ok, err = self.checker.precedence_check("X G a", list("XGa"))
assert ok, err
ok, err = self.checker.precedence_check("GX a", list("GXa"))
assert ok, err
ok, err = self.checker.precedence_check(
"XGXFWX G prop0", "X G X F WX G prop0".split(" ")
)
assert ok, err
ok, err = self.checker.precedence_check(
"XXWX!(!WXGGG a)", "X X WX ! ( ) ! WX G G G a".split(" ")
)
assert ok, err
def test_bad_termination(self):
# Wrong termination or space
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!a&", list("!a&"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!&b", list("!&b"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("a|b|", list("a|b|"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("G", list("G"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("(a)(", list("(a)("))
with pytest.raises(lark.UnexpectedInput) as exc:
self.checker.precedence_check("aUa", list("aUa"))
with pytest.raises(lark.UnexpectedInput) as exc:
self.checker.precedence_check("Xa", list("Xa"))
def test_bad_names(self):
# Invalid names
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("G X G", list("GXG"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("Future", ["Future"])
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("X F", list("XF"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!X", list("!X"))
| 9,118 | 29.295681 | 106 | py |
LTLf2DFA | LTLf2DFA-main/tests/test_pl.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Test the Propositional Logic."""
import os
import lark
import pytest
from ltlf2dfa.parser.pl import PLParser
from ltlf2dfa.pl import (
PLAnd,
PLAtomic,
PLEquivalence,
PLFalse,
PLImplies,
PLNot,
PLOr,
PLTrue,
)
from .parsing import ParsingCheck
def test_parser():
parser = PLParser()
sa, sb, sc = "A", "B", "C"
a, b, c = PLAtomic(sa), PLAtomic(sb), PLAtomic(sc)
parsed_a = parser("A")
assert parsed_a == a
parsed_b = parser("B")
assert parsed_b == b
parsed_not_a = parser("~A")
assert parsed_not_a == PLNot(a)
a_and_b = parser("A & B")
true_a_and_b = PLAnd([a, b])
assert a_and_b == true_a_and_b
a_or_b = parser("A | B")
true_a_or_b = PLOr([a, b])
assert a_or_b == true_a_or_b
material_implication = parser("!A | B <-> !(A & !B) <-> A->B")
true_material_implication = PLEquivalence(
[PLOr([PLNot(a), b]), PLNot(PLAnd([a, PLNot(b)])), PLImplies([a, b])]
)
assert material_implication == true_material_implication
a_imply_b = parser("A -> B")
true_a_imply_b = PLImplies([a, b])
assert a_imply_b == true_a_imply_b
a_imply_b_imply_c = parser("A -> B -> C")
true_a_imply_b_imply_c = PLImplies([a, b, c])
assert a_imply_b_imply_c == true_a_imply_b_imply_c
true_a_and_false_and_true = PLAnd([a, PLFalse(), PLTrue()])
a_and_false_and_true = parser("A & false & true")
assert a_and_false_and_true == true_a_and_false_and_true
def test_negate():
sa, sb, sc = "A", "B", "c"
a, b, c = PLAtomic(sa), PLAtomic(sb), PLAtomic(sc)
a_and_b = PLAnd([a, b])
not_a_or_not_b = PLOr([PLNot(a), PLNot(b)])
assert a_and_b.negate() == not_a_or_not_b
a_and_b_and_c = PLAnd([a, b, c])
not_a_or_not_b_or_not_c = PLOr([PLNot(a), PLNot(b), PLNot(c)])
assert a_and_b_and_c.negate() == not_a_or_not_b_or_not_c
a_or_b = PLOr([a, b])
not_a_and_not_b = PLAnd([PLNot(a), PLNot(b)])
assert a_or_b.negate() == not_a_and_not_b
def test_nnf():
parser = PLParser()
sa, sb = "A", "B"
a, b = PLAtomic(sa), PLAtomic(sb)
not_a_and_b = parser("!(A&B)")
nnf_not_a_and_b = parser("!A | !B")
assert not_a_and_b.to_nnf() == nnf_not_a_and_b
assert nnf_not_a_and_b == nnf_not_a_and_b.to_nnf()
dup = parser("!(A | A)")
nnf_dup = dup.to_nnf()
assert nnf_dup == PLAnd([PLNot(a), PLNot(a)])
material_implication = parser("!A | B <-> !(A & !B) <-> A->B")
nnf_material_implication = parser(
"((!A | B) & (!A | B) & (!A | B)) | ((A & !B) & (A & !B) & (A & !B))"
)
nnf_m = material_implication.to_nnf()
assert nnf_m == nnf_material_implication.to_nnf()
def test_find_labels():
parser = PLParser()
# complete formula
f = "!A | B <-> !(A & !B) <-> A->B"
formula = parser(f)
assert formula.find_labels() == ["A", "B"]
# more than one character
f = "!A & (!AB & !A0)"
formula = parser(f)
assert formula.find_labels() == [c for c in ["A", "AB", "A0"]]
# another formula
f = "!A | B <-> !(C & !B) <-> C->A"
formula = parser(f)
assert formula.find_labels() == ["A", "B", "C"]
def test_find_atomics():
parser = PLParser()
sa, sb, sab, sa0 = "A", "B", "AB", "A0"
a, b, ab, a0 = PLAtomic(sa), PLAtomic(sb), PLAtomic(sab), PLAtomic(sa0)
# complete formula
f = "!A | B <-> !(A & !B) <-> A->B"
formula = parser(f)
assert formula.find_atomics() == {a, b}
# more than one character
f = "!A & (!AB & !A0)"
formula = parser(f)
assert formula.find_atomics() == {c for c in {a, ab, a0}}
def test_mona():
# parser = PLParser()
a, b, c = [PLAtomic(c) for c in "abc"]
true = PLTrue()
false = PLFalse()
assert a.to_mona(v="0") == "(0 in A)"
assert b.to_mona(v="0") == "(0 in B)"
assert c.to_mona(v="0") == "(0 in C)"
assert true.to_mona(v="0") == "true"
assert false.to_mona(v="0") == "false"
def test_names():
good = [
"A",
"b",
"Hello",
"PropZero",
"Prop0",
"this_is_fine_2",
'"This is also allowed!"',
PLParser()("A -> B"),
]
bad = ["!", "&", "Invalid:", "", '"', "="]
for name in good:
PLAtomic(name)
for name in bad:
with pytest.raises(ValueError):
PLAtomic(name)
class TestParsingTree:
"""
The parsing tree should give the right priority to the operators.
"""
@classmethod
def setup_class(cls):
# Path to grammar
this_path = os.path.dirname(os.path.abspath(__file__))
grammar_path = "../ltlf2dfa/parser/pl.lark"
grammar_path = os.path.join(this_path, *grammar_path.split("/"))
cls.checker = ParsingCheck(grammar_path)
def test_unary(self):
ok, err = self.checker.precedence_check("!a & b", list("&!ab"))
assert ok, err
ok, err = self.checker.precedence_check("a & !b", list("&a!b"))
assert ok, err
ok, err = self.checker.precedence_check("a | !b", list("|a!b"))
assert ok, err
def test_and_or(self):
ok, err = self.checker.precedence_check("a & b & c", list("&&abc"))
assert ok, err
ok, err = self.checker.precedence_check("a & b | c", list("|&abc"))
assert ok, err
ok, err = self.checker.precedence_check("a | b & c", list("|a&bc"))
assert ok, err
def test_implications(self):
ok, err = self.checker.precedence_check(
"a <-> b -> c3", "<->,a,->,b,c3".split(",")
)
assert ok, err
ok, err = self.checker.precedence_check(
"(a <-> b) -> c", "->,(,),<->,a,b,c".split(",")
)
assert ok, err
def test_misc(self):
ok, err = self.checker.precedence_check(
"!a&(b->c)", "&,!,a,(,),->,b,c".split(",")
)
assert ok, err
def test_bad_examples(self):
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!a&", list("!a&"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!&b", list("!&b"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("a|b|", list("a|b|"))
| 6,954 | 27.157895 | 77 | py |
LTLf2DFA | LTLf2DFA-main/tests/conftest.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains the configurations for the tests."""
import inspect
import os
from pathlib import Path
TEST_ROOT_DIR = os.path.dirname(inspect.getfile(inspect.currentframe())) # type: ignore
ROOT_DIR = str(Path(TEST_ROOT_DIR, "..").resolve()) # type: ignore
| 978 | 36.653846 | 88 | py |
LTLf2DFA | LTLf2DFA-main/tests/test_pltlf.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Test PPLTL."""
import os
import lark
import pytest
from ltlf2dfa.parser.ppltl import PPLTLParser
from ltlf2dfa.ppltl import (
PPLTLAnd,
PPLTLAtomic,
PPLTLBefore,
PPLTLEquivalence,
PPLTLFalse,
PPLTLHistorically,
PPLTLImplies,
PPLTLNot,
PPLTLOnce,
PPLTLOr,
PPLTLPastRelease,
PPLTLSince,
PPLTLStart,
PPLTLTrue,
PPLTLWeakBefore,
)
# from .conftest import LTLfFixtures
from .parsing import ParsingCheck
def test_parser():
parser = PPLTLParser()
a, b, c = [PPLTLAtomic(c) for c in "abc"]
assert parser("!a | b <-> !(a & !b) <-> a->b") == PPLTLEquivalence(
[
PPLTLOr([PPLTLNot(a), b]),
PPLTLNot(PPLTLAnd([a, PPLTLNot(b)])),
PPLTLImplies([a, b]),
]
)
assert parser("(Y a)") == PPLTLBefore(a)
assert parser("a & O(b)") == PPLTLAnd([a, PPLTLOnce(b)])
assert parser("(O (a&b)) <-> !(H (!a | !b) )") == PPLTLEquivalence(
[
PPLTLOnce(PPLTLAnd([a, b])),
PPLTLNot(PPLTLHistorically(PPLTLOr([PPLTLNot(a), PPLTLNot(b)]))),
]
)
assert parser("(a S b S !c)") == PPLTLSince([a, b, PPLTLNot(c)])
assert parser("a & start") == PPLTLAnd([a, PPLTLStart()])
def test_negate():
parser = PPLTLParser()
sa, sb, sc = "a", "b", "c"
a, b, c = PPLTLAtomic(sa), PPLTLAtomic(sb), PPLTLAtomic(sc)
a_and_b = PPLTLAnd([a, b])
not_a_or_not_b = PPLTLOr([PPLTLNot(a), PPLTLNot(b)])
assert a_and_b.negate() == not_a_or_not_b
a_and_b_and_c = PPLTLAnd([a, b, c])
not_a_or_not_b_or_not_c = PPLTLOr([PPLTLNot(a), PPLTLNot(b), PPLTLNot(c)])
assert a_and_b_and_c.negate() == not_a_or_not_b_or_not_c
before_a = PPLTLBefore(a)
weakBefore_not_a = PPLTLWeakBefore(PPLTLNot(a))
assert before_a.negate() == weakBefore_not_a
once_a = PPLTLOnce(a)
false_pastRelease_not_a = PPLTLPastRelease([PPLTLFalse(), PPLTLNot(a)])
assert once_a.negate() == false_pastRelease_not_a
historically_a = PPLTLHistorically(a)
true_since_not_a = PPLTLSince([PPLTLTrue(), PPLTLNot(a)])
assert historically_a.negate() == true_since_not_a
def test_names():
good = ["a", "b", "name", "complex_name", "proposition10"]
bad = ["Future", "X", "$", "", "40a", "niceName"]
for name in good:
str(PPLTLAtomic(name)) == name
for name in bad:
with pytest.raises(ValueError):
str(PPLTLAtomic(name)) == name
def test_nnf():
parser = PPLTLParser()
a, b, c = [PPLTLAtomic(c) for c in "abc"]
f = parser("!(a & !b)")
assert f.to_nnf() == PPLTLOr([PPLTLNot(a), b])
f = parser("!(!a | b)")
assert f.to_nnf() == PPLTLAnd([a, PPLTLNot(b)])
f = parser("!(a <-> b)")
assert f.to_nnf() == PPLTLAnd(
[PPLTLOr([PPLTLNot(a), PPLTLNot(b)]), PPLTLOr([a, b])]
)
# Yesterday and Weak Yesterday
f = parser("!(Y (a & b))")
assert f.to_nnf() == PPLTLWeakBefore(PPLTLOr([PPLTLNot(a), PPLTLNot(b)]))
f = parser("!(WY (a & b))")
assert f.to_nnf() == PPLTLBefore(PPLTLOr([PPLTLNot(a), PPLTLNot(b)]))
# Once and Historically
f = parser("!(O (a | b))")
assert (
f.to_nnf() == PPLTLHistorically(PPLTLAnd([PPLTLNot(a), PPLTLNot(b)])).to_nnf()
)
# Since
f = parser("!(a S b)")
assert f.to_nnf() == PPLTLPastRelease([PPLTLNot(a), PPLTLNot(b)])
f = parser("!(a P b)")
assert f.to_nnf() == PPLTLSince([PPLTLNot(a), PPLTLNot(b)])
f = parser("!(O (a | b))")
assert (
f.to_nnf() == PPLTLHistorically(PPLTLAnd([PPLTLNot(a), PPLTLNot(b)])).to_nnf()
)
f = parser("!(H (a | b))")
assert f.to_nnf() == PPLTLOnce(PPLTLAnd([PPLTLNot(a), PPLTLNot(b)])).to_nnf()
def test_mona():
parser = PPLTLParser()
a, b, c = [PPLTLAtomic(c) for c in "abc"]
tt = PPLTLTrue()
ff = PPLTLFalse()
assert a.to_mona(v="max($)") == "(max($) in A)"
assert b.to_mona(v="max($)") == "(max($) in B)"
assert c.to_mona(v="max($)") == "(max($) in C)"
assert tt.to_mona(v="max($)") == "true"
assert ff.to_mona(v="max($)") == "false"
f = parser("!(a & !b)")
assert f.to_mona(v="max($)") == "~(((max($) in A) & ~((max($) in B))))"
f = parser("!(!a | b)")
assert f.to_mona(v="max($)") == "~((~((max($) in A)) | (max($) in B)))"
f = parser("!(a <-> b)")
assert (
f.to_nnf().to_mona(v="max($)")
== "((~((max($) in A)) | ~((max($) in B))) & ((max($) in A) | (max($) in B)))"
)
# Before
f = parser("Y(a & b)")
assert (
f.to_mona(v="max($)")
== "(ex1 v_1: v_1 in $ & v_1=max($)-1 & max($)>0 & ((v_1 in A) & (v_1 in B)))"
)
# Since
f = parser("a S b")
assert (
f.to_mona(v="max($)")
== "(ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & (v_1 in B) & (all1 v_2: v_2 in $ & v_1<v_2&v_2<=max($)"
" => (v_2 in A)))"
)
# Once and Historically
f = parser("O(a & b)")
assert (
f.to_mona(v="max($)")
== "(ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & ((v_1 in A) & (v_1 in B)) & (all1 v_2: "
"v_2 in $ & v_1<v_2&v_2<=max($) => true))"
)
f = parser("a & O(b)")
assert (
f.to_mona(v="max($)")
== "((max($) in A) & (ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & (v_1 in B) & (all1 v_2: v_2 in $ & v_1<v_2&v_2<=max($) => "
"true)))"
)
f = parser("H(a | b)")
assert (
f.to_mona(v="max($)")
== "~((ex1 v_1: v_1 in $ & 0<=v_1&v_1<=max($) & ~(((v_1 in A) | (v_1 in B))) & (all1 v_2: "
"v_2 in $ & v_1<v_2&v_2<=max($) => true)))"
)
# @pytest.fixture(scope="session", params=LTLfFixtures.ltlf_formulas)
# def ltlf_formula_automa_pair(request):
# formula_obj = parser(request.param)
# automaton = formula_obj.to_automaton()
# return formula_obj, automaton
#
#
# @pytest.fixture(scope="session", params=LTLfFixtures.ltlf_formulas)
# def ltlf_formula_nnf_pair(request):
# formula_obj = parser(request.param)
# nnf = formula_obj.to_nnf()
# return formula_obj, nnf
class TestParsingTree:
@classmethod
def setup_class(cls):
# Path to grammar
this_path = os.path.dirname(os.path.abspath(__file__))
grammar_path = "../ltlf2dfa/parser/ppltl.lark"
grammar_path = os.path.join(this_path, *grammar_path.split("/"))
cls.checker = ParsingCheck(grammar_path)
def test_propositional(self):
ok, err = self.checker.precedence_check("a & !b | c", list("|&a!bc"))
assert ok, err
ok, err = self.checker.precedence_check(
"!a&(b->c)", "&,!,a,(,),->,b,c".split(",")
)
assert ok, err
def test_unary(self):
ok, err = self.checker.precedence_check("Y Y a", list("YYa"))
assert ok, err
ok, err = self.checker.precedence_check(
"Y(H faLse)", "Y ( ) H faLse".split(" ")
)
assert ok, err
ok, err = self.checker.precedence_check("Y H a", list("YHa"))
assert ok, err
ok, err = self.checker.precedence_check("HY a", list("HYa"))
assert ok, err
ok, err = self.checker.precedence_check(
"YHYO H prop0", "Y H Y O H prop0".split(" ")
)
assert ok, err
ok, err = self.checker.precedence_check(
"YY!(!HHH a)", "Y Y ! ( ) ! H H H a".split(" ")
)
assert ok, err
def test_bad_termination(self):
# Wrong termination or space
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!a&", list("!a&"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!&b", list("!&b"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("a|b|", list("a|b|"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("H", list("H"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("(a)(", list("(a)("))
with pytest.raises(lark.UnexpectedInput) as exc:
self.checker.precedence_check("aSa", list("aSa"))
with pytest.raises(lark.UnexpectedInput) as exc:
self.checker.precedence_check("Ya", list("Ya"))
def test_bad_names(self):
# Invalid names
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("H Y H", list("HYH"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("Past", ["Past"])
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("Y O", list("YO"))
with pytest.raises(lark.UnexpectedInput):
self.checker.precedence_check("!Y", list("!Y"))
| 9,453 | 29.496774 | 130 | py |
LTLf2DFA | LTLf2DFA-main/tests/test_misc.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Misc tests."""
import os
def test_ltlf_example_readme():
from ltlf2dfa.parser.ltlf import LTLfParser
parser = LTLfParser()
formula = "G(a -> X b)"
parsed_formula = parser(formula)
assert str(parsed_formula) == "G((a -> X(b)))"
assert parsed_formula.find_labels() == [c for c in "ab"]
# dfa = parsed_formula.to_dfa()
def test_ppltl_example_readme():
from ltlf2dfa.parser.ppltl import PPLTLParser
parser = PPLTLParser()
formula = "H(a -> Y b)"
parsed_formula = parser(formula)
assert str(parsed_formula) == "H((a -> Y(b)))"
assert parsed_formula.find_labels() == [c for c in "ab"]
# dfa = parsed_formula.to_dfa()
def test_hash_consistency_after_pickling():
import pickle # nosec
from ltlf2dfa.parser.ltlf import LTLfParser
parser = LTLfParser()
formula = "F (a & !b)"
old_obj = parser(formula)
h = hash(old_obj)
pickle.dump(old_obj, open("temp", "wb")) # nosec
new_obj = pickle.load(open("temp", "rb")) # nosec
assert new_obj._hash is None
assert h == hash(new_obj)
os.remove("temp")
def test_QuotedFormula():
from ltlf2dfa.base import QuotedFormula
from ltlf2dfa.ltlf import LTLfAnd, LTLfAtomic
from ltlf2dfa.parser.ltlf import LTLfParser
f = LTLfParser()("!(G a)")
qf = QuotedFormula(f)
atomf = LTLfAnd([LTLfAtomic(f), LTLfAtomic(f)])
assert qf.wrapped is f
dir_qf = dir(qf)
for member in dir(f):
assert member in dir_qf
assert hasattr(qf, member)
| 2,237 | 25.963855 | 70 | py |
LTLf2DFA | LTLf2DFA-main/tests/test_ltlf2dfa.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Test the ltlf2dfa tool."""
from sympy import And, Not, Or, Symbol, symbols
from ltlf2dfa.ltlf2dfa import simplify_guard, ter2symb
from ltlf2dfa.parser.ltlf import LTLfParser
from ltlf2dfa.parser.ppltl import PPLTLParser
def test_ltlf_dfa():
parser = LTLfParser()
f = parser("a")
dfa = f.to_dfa(mona_dfa_out=False)
mona_dfa = f.to_dfa(mona_dfa_out=True)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 3;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 2 [label="~a"];
1 -> 3 [label="a"];
2 -> 2 [label="true"];
3 -> 3 [label="true"];
}"""
expected_mona = """DFA for formula with free variables: A
Initial state: 0
Accepting states: 3
Rejecting states: 0 1 2
Automaton has 4 states and 4 BDD-nodes
Transitions:
State 0: X -> state 1
State 1: 0 -> state 2
State 1: 1 -> state 3
State 2: X -> state 2
State 3: X -> state 3
A counter-example of least length (0) is:
A X
A = {}
A satisfying example of least length (1) is:
A X 1
A = {0}"""
assert dfa == expected
assert mona_dfa == expected_mona
f = parser("true")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 1;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 1 [label="true"];
}"""
assert dfa == expected
f = parser("false")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle];
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 1 [label="true"];
}"""
assert dfa == expected
f = parser("G a")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 1;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 2 [label="~a"];
1 -> 1 [label="a"];
2 -> 2 [label="true"];
}"""
assert dfa == expected
f = parser("F(a & b)")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 2;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 1 [label="~a | ~b"];
1 -> 2 [label="a & b"];
2 -> 2 [label="true"];
}"""
assert dfa == expected
f = parser("X(a)")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 4;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 2 [label="true"];
2 -> 3 [label="~a"];
2 -> 4 [label="a"];
3 -> 3 [label="true"];
4 -> 4 [label="true"];
}"""
assert dfa == expected
f = parser("a U b")
dfa = f.to_dfa(mona_dfa_out=False)
expected1 = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 3;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 2 [label="~a & ~b"];
1 -> 3 [label="b"];
1 -> 4 [label="a & ~b"];
2 -> 2 [label="true"];
3 -> 3 [label="true"];
4 -> 2 [label="~a & ~b"];
4 -> 3 [label="b"];
4 -> 4 [label="a & ~b"];
}"""
expected2 = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 3;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 2 [label="~a & ~b"];
1 -> 3 [label="a & ~b"];
1 -> 4 [label="b"];
2 -> 2 [label="true"];
3 -> 2 [label="~a & ~b"];
3 -> 3 [label="a & ~b"];
3 -> 4 [label="b"];
4 -> 4 [label="true"];
}"""
assert dfa == expected1 or expected2
f = parser("G(a) & F(b)")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 3;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 2 [label="~a"];
1 -> 1 [label="a & ~b"];
1 -> 3 [label="a & b"];
2 -> 2 [label="true"];
3 -> 2 [label="~a"];
3 -> 3 [label="a"];
}"""
assert dfa == expected
def test_ltlf_mona_dfa():
parser = LTLfParser()
f = parser("a")
mona_dfa = f.to_dfa(mona_dfa_out=True)
expected_mona = """DFA for formula with free variables: A
Initial state: 0
Accepting states: 3
Rejecting states: 0 1 2
Automaton has 4 states and 4 BDD-nodes
Transitions:
State 0: X -> state 1
State 1: 0 -> state 2
State 1: 1 -> state 3
State 2: X -> state 2
State 3: X -> state 3
A counter-example of least length (0) is:
A X
A = {}
A satisfying example of least length (1) is:
A X 1
A = {0}"""
assert mona_dfa == expected_mona
f = parser("true")
mona_dfa = f.to_dfa(mona_dfa_out=True)
expected_mona = """DFA for formula with free variables:
Initial state: 0
Accepting states: 1
Rejecting states: 0
Automaton has 2 states and 1 BDD-node
Transitions:
State 0: -> state 1
State 1: -> state 1
Formula is valid
A satisfying example of least length (0) is:"""
assert mona_dfa == expected_mona
f = parser("false")
mona_dfa = f.to_dfa(mona_dfa_out=True)
expected_mona = """DFA for formula with free variables:
Initial state: 0
Accepting states:
Rejecting states: 0
Automaton has 1 state and 1 BDD-node
Transitions:
State 0: -> state 0
Formula is unsatisfiable
A counter-example of least length (0) is:"""
assert mona_dfa == expected_mona
f = parser("G a")
mona_dfa = f.to_dfa(mona_dfa_out=True)
expected_mona = """DFA for formula with free variables: A
Initial state: 0
Accepting states: 1
Rejecting states: 0 2
Automaton has 3 states and 3 BDD-nodes
Transitions:
State 0: X -> state 1
State 1: 0 -> state 2
State 1: 1 -> state 1
State 2: X -> state 2
A counter-example of least length (1) is:
A X 0
A = {}
A satisfying example of least length (0) is:
A X
A = {}"""
assert mona_dfa == expected_mona
f1 = parser("F(WX(false))")
f2 = parser("F(!(X(!(false))))")
mona_dfa_1 = f1.to_dfa(mona_dfa_out=True)
mona_dfa_2 = f2.to_dfa(mona_dfa_out=True)
assert mona_dfa_1 == mona_dfa_2
f1 = parser("F(b & WX false) -> F(a & (WX false | X(WX false)))")
f2 = parser(
"((! (F (! ((! b) || (X (! false)))))) || (F (! ((! a) || (! ((! (X (! false))) || (X (! (X (! false))))))))))"
)
mona_dfa_1 = f1.to_dfa(mona_dfa_out=True)
mona_dfa_2 = f2.to_dfa(mona_dfa_out=True)
assert mona_dfa_1 == mona_dfa_2
def test_ppltl_dfa():
parser = PPLTLParser()
f = parser("a")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 2;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 1 [label="~a"];
1 -> 2 [label="a"];
2 -> 1 [label="~a"];
2 -> 2 [label="a"];
}"""
assert dfa == expected
f = parser("true")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 1;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 1 [label="true"];
}"""
assert dfa == expected
f = parser("O(a) -> O(b)")
dfa = f.to_dfa(mona_dfa_out=False)
expected = """digraph MONA_DFA {
rankdir = LR;
center = true;
size = "7.5,10.5";
edge [fontname = Courier];
node [height = .5, width = .5];
node [shape = doublecircle]; 1; 2;
node [shape = circle]; 1;
init [shape = plaintext, label = ""];
init -> 1;
1 -> 1 [label="~a & ~b"];
1 -> 2 [label="b"];
1 -> 3 [label="a & ~b"];
2 -> 2 [label="true"];
3 -> 3 [label="~b"];
3 -> 2 [label="b"];
}"""
assert dfa == expected
def test_ter2symb():
ap = symbols("a b c")
tern_0 = "X"
actual = ter2symb(None, tern_0)
expected = And()
assert expected == actual
tern_1 = "0X"
actual = ter2symb(ap, tern_1)
expected = And(Not(Symbol("a")))
assert expected == actual
tern_2 = "10"
actual = ter2symb(ap, tern_2)
expected = And(Symbol("a"), Not(Symbol("b")))
assert expected == actual
tern_3 = "0X1"
actual = ter2symb(ap, tern_3)
expected = And(Not(Symbol("a")), Symbol("c"))
assert expected == actual
ap = symbols("a b c d e f g h i")
tern_4 = "0X110XX01"
actual = ter2symb(ap, tern_4)
expected = And(
Not(Symbol("a")),
Symbol("c"),
Symbol("d"),
Not(Symbol("e")),
Not(Symbol("h")),
Symbol("i"),
)
assert expected == actual
def test_simplify_guard():
ap = symbols("a b c")
tern_1 = "0XX"
tern_2 = "10X"
tern_3 = "110"
sym_1 = ter2symb(ap, tern_1)
sym_2 = ter2symb(ap, tern_2)
sym_3 = ter2symb(ap, tern_3)
actual = simplify_guard([sym_1, sym_2, sym_3])
expected = Or(Not(Symbol("a")), Not(Symbol("b")), Not(Symbol("c")))
assert expected == actual
ap = symbols("a b c d e f g h")
tern_1 = "0XXXXXXX"
tern_2 = "10XXXXXX"
tern_3 = "110XXXXX"
tern_4 = "1110XXXX"
tern_5 = "11110XXX"
tern_6 = "111110XX"
tern_7 = "1111110X"
tern_8 = "11111110"
sym_1 = ter2symb(ap, tern_1)
sym_2 = ter2symb(ap, tern_2)
sym_3 = ter2symb(ap, tern_3)
sym_4 = ter2symb(ap, tern_4)
sym_5 = ter2symb(ap, tern_5)
sym_6 = ter2symb(ap, tern_6)
sym_7 = ter2symb(ap, tern_7)
sym_8 = ter2symb(ap, tern_8)
actual = simplify_guard([sym_1, sym_2, sym_3, sym_4, sym_5, sym_6, sym_7, sym_8])
expected = Or(
Not(Symbol("a")),
Not(Symbol("b")),
Not(Symbol("c")),
Not(Symbol("d")),
Not(Symbol("e")),
Not(Symbol("f")),
Not(Symbol("g")),
Not(Symbol("h")),
)
assert expected == actual
sym_1 = Not(Symbol("a"))
sym_2 = And(Symbol("a"), Not(Symbol("b")))
sym_3 = And(Symbol("a"), Symbol("b"), Not(Symbol("c")))
sym_4 = And(Symbol("a"), Symbol("b"), Symbol("c"), Not(Symbol("d")))
sym_5 = And(Symbol("a"), Symbol("b"), Symbol("c"), Symbol("d"), Not(Symbol("e")))
sym_6 = And(
Symbol("a"),
Symbol("b"),
Symbol("c"),
Symbol("d"),
Symbol("e"),
Not(Symbol("f")),
)
sym_7 = And(
Symbol("a"),
Symbol("b"),
Symbol("c"),
Symbol("d"),
Symbol("e"),
Symbol("f"),
Not(Symbol("g")),
)
sym_8 = And(
Symbol("a"),
Symbol("b"),
Symbol("c"),
Symbol("d"),
Symbol("e"),
Symbol("f"),
Symbol("g"),
Not(Symbol("h")),
)
actual = simplify_guard([sym_1, sym_2, sym_3, sym_4, sym_5, sym_6, sym_7, sym_8])
expected = Or(
Not(Symbol("a")),
Not(Symbol("b")),
Not(Symbol("c")),
Not(Symbol("d")),
Not(Symbol("e")),
Not(Symbol("f")),
Not(Symbol("g")),
Not(Symbol("h")),
)
assert expected == actual
| 12,544 | 23.454191 | 119 | py |
LTLf2DFA | LTLf2DFA-main/tests/parsing.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""Small helper class to check the correct parsing."""
from typing import List, Optional, Tuple
from lark import Lark
from lark.lexer import Token # type: ignore
class ParsingCheck:
def __init__(self, lark):
"""\
Constructor
:param lark: path of a lark grammar file.
"""
self.parser = Lark(open(lark), parser="lalr", debug=True)
def precedence_check(
self, formula: str, tokens: List[str], print_tree: bool = False
) -> Tuple[bool, Optional[str]]:
"""\
Parse the formula and check that operators have the expected
precedence.
:param formula: expression to parse.
:param tokens: a list of symbols (top-down, left-right) order in the
parsing tree.
:param print_tree: verbose parsing tree.
:returns: false boolean, if parsing did not respect the order;
and a string of error
"""
# Parse
tree = self.parser.parse(formula)
topdown_it = tree.iter_subtrees_topdown()
if print_tree:
print(tree.pretty())
# Navigate
token_i = 0
for elem in topdown_it:
for child in elem.children:
# Filter tokens and match
if isinstance(child, Token):
if child != tokens[token_i]:
err = "Next expected '{}', got '{}'".format(
tokens[token_i], str(child)
)
return False, err
token_i += 1
# Check length
if token_i != len(tokens):
return False, "The input was too short"
return True, None
| 2,420 | 30.441558 | 76 | py |
LTLf2DFA | LTLf2DFA-main/tests/__init__.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# This file is part of ltlf2dfa.
#
# ltlf2dfa is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ltlf2dfa is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ltlf2dfa. If not, see <https://www.gnu.org/licenses/>.
#
"""This module contains the tests for the ltlf2dfa tool."""
| 769 | 37.5 | 70 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/main.py | from __future__ import print_function, division
import argparse
import os
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
from torch.autograd import Variable
import torchvision.utils as vutils
import torch.nn.functional as F
import numpy as np
import time
from tensorboardX import SummaryWriter
from datasets import __datasets__
from models.HITNet import HITNet
from loss.total_loss import global_loss
from loss.propagation_loss import prop_loss
from utils import *
from torch.utils.data import DataLoader
import gc
import json
from datetime import datetime
from utils.saver import Saver
import pdb
cudnn.benchmark = True
parser = argparse.ArgumentParser(description='HITNet')
parser.add_argument('--maxdisp', type=int, default=192, help='maximum disparity')
parser.add_argument('--fea_c', type=list, default=[32, 24, 24, 16, 16], help='feature extraction channels')
parser.add_argument('--dataset', required=True, help='dataset name', choices=__datasets__.keys())
parser.add_argument('--datapath', required=True, help='data path')
parser.add_argument('--trainlist', required=True, help='training list')
parser.add_argument('--testlist', required=True, help='testing list')
parser.add_argument('--lr', type=float, default=0.001, help='base learning rate')
parser.add_argument('--batch_size', type=int, default=16, help='training batch size')
parser.add_argument('--test_batch_size', type=int, default=8, help='testing batch size')
parser.add_argument('--epochs', type=int, required=True, help='number of epochs to train')
parser.add_argument('--lrepochs', type=str, required=True, help='the epochs to decay lr: the downscale rate')
parser.add_argument('--ckpt_start_epoch', type=int, default=0, help='the epochs at which the program start saving ckpt')
parser.add_argument('--logdir', required=True, help='the directory to save logs and checkpoints')
parser.add_argument('--loadckpt', help='load the weights from a specific checkpoint')
parser.add_argument('--resume', type=str, help='continue training the model')
parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)')
parser.add_argument('--summary_freq', type=int, default=20, help='the frequency of saving summary')
parser.add_argument('--save_freq', type=int, default=1, help='the frequency of saving checkpoint')
# parse arguments, set seeds
args = parser.parse_args()
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
os.makedirs(args.logdir, exist_ok=True)
# create summary logger
saver = Saver(args)
print("creating new summary file")
logger = SummaryWriter(saver.experiment_dir)
logfilename = saver.experiment_dir + '/log.txt'
with open(logfilename, 'a') as log: # wrt running information to log
log.write('\n\n\n\n')
log.write('-------------------NEW RUN-------------------\n')
log.write(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
log.write('\n')
json.dump(args.__dict__, log, indent=2)
log.write('\n')
# dataset, dataloader
StereoDataset = __datasets__[args.dataset]
train_dataset = StereoDataset(args.datapath, args.trainlist, True)
test_dataset = StereoDataset(args.datapath, args.testlist, False)
TrainImgLoader = DataLoader(train_dataset, args.batch_size, shuffle=True, num_workers=8, drop_last=False)
TestImgLoader = DataLoader(test_dataset, args.test_batch_size, shuffle=False, num_workers=4, drop_last=False)
# model, optimizer
model = HITNet(args)
model = nn.DataParallel(model)
model.cuda()
optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.999))
# load parameters
start_epoch = 0
if args.resume:
print("loading the lastest model in logdir: {}".format(args.resume))
state_dict = torch.load(args.resume)
model.load_state_dict(state_dict['model'])
optimizer.load_state_dict(state_dict['optimizer'])
start_epoch = state_dict['epoch'] + 1
elif args.loadckpt:
# load the checkpoint file specified by args.loadckpt
print("loading model {}".format(args.loadckpt))
state_dict = torch.load(args.loadckpt)
model.load_state_dict(state_dict['model'])
print("start at epoch {}".format(start_epoch))
def train():
min_EPE = args.maxdisp
min_D1 = 1
min_Thres3 = 1
for epoch_idx in range(start_epoch, args.epochs):
adjust_learning_rate(optimizer, epoch_idx, args.lr, args.lrepochs)
# training
for batch_idx, sample in enumerate(TrainImgLoader):
# if batch_idx == 2:
# break
global_step = len(TrainImgLoader) * epoch_idx + batch_idx
start_time = time.time()
do_summary = global_step % args.summary_freq == 0
loss, scalar_outputs, image_outputs = train_sample(sample, compute_metrics=do_summary)
if do_summary:
save_scalars(logger, 'train', scalar_outputs, global_step)
save_images(logger, 'train', image_outputs, global_step)
del scalar_outputs, image_outputs
print('Epoch {}/{}, Iter {}/{}, train loss = {}, time = {:.3f}'.format(epoch_idx, args.epochs,
batch_idx,
len(TrainImgLoader), loss,
time.time() - start_time))
with open(logfilename, 'a') as log:
log.write('Epoch {}/{}, Iter {}/{}, train loss = {}, time = {:.3f}\n'.format(epoch_idx, args.epochs,
batch_idx,
len(TrainImgLoader),
loss,
time.time() - start_time))
# saving checkpoints
if (epoch_idx + 1) % args.save_freq == 0 and epoch_idx >= args.ckpt_start_epoch:
checkpoint_data = {'epoch': epoch_idx, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(checkpoint_data, "{}/checkpoint_{:0>6}.ckpt".format(saver.experiment_dir, epoch_idx))
gc.collect()
# testing
avg_test_scalars = AverageMeterDict()
for batch_idx, sample in enumerate(TestImgLoader):
# if batch_idx == 2:
# break
global_step = len(TestImgLoader) * epoch_idx + batch_idx
start_time = time.time()
do_summary = global_step % args.summary_freq == 0
loss, scalar_outputs, image_outputs = test_sample(sample, compute_metrics=do_summary)
if do_summary:
save_scalars(logger, 'test', scalar_outputs, global_step)
save_images(logger, 'test', image_outputs, global_step)
avg_test_scalars.update(scalar_outputs)
del scalar_outputs, image_outputs
print('Epoch {}/{}, Iter {}/{}, test loss = {}, time = {:3f}'.format(epoch_idx, args.epochs,
batch_idx,
len(TestImgLoader), loss,
time.time() - start_time))
with open(logfilename, 'a') as log:
log.write('Epoch {}/{}, Iter {}/{}, test loss = {}, time = {:.3f}\n'.format(epoch_idx, args.epochs,
batch_idx,
len(TestImgLoader),
loss,
time.time() - start_time))
avg_test_scalars = avg_test_scalars.mean()
if avg_test_scalars['EPE'][-1] < min_EPE:
min_EPE = avg_test_scalars['EPE'][-1]
minEPE_epoch = epoch_idx
checkpoint_data = {'epoch': epoch_idx, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(checkpoint_data, "{}/bestEPE_checkpoint.ckpt".format(saver.experiment_dir))
if avg_test_scalars['D1'][-1] < min_D1:
min_D1 = avg_test_scalars['D1'][-1]
minD1_epoch = epoch_idx
checkpoint_data = {'epoch': epoch_idx, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(checkpoint_data, "{}/bestD1_checkpoint.ckpt".format(saver.experiment_dir))
if avg_test_scalars['Thres3'][-1] < min_Thres3:
min_Thres3 = avg_test_scalars['Thres3'][-1]
minThres3_epoch = epoch_idx
checkpoint_data = {'epoch': epoch_idx, 'model': model.state_dict(), 'optimizer': optimizer.state_dict()}
torch.save(checkpoint_data, "{}/bestThres3_checkpoint.ckpt".format(saver.experiment_dir))
save_scalars(logger, 'fulltest', avg_test_scalars, len(TrainImgLoader) * (epoch_idx + 1))
print("avg_test_scalars", avg_test_scalars)
with open(logfilename, 'a') as log:
js = json.dumps(avg_test_scalars)
log.write(js)
log.write('\n')
gc.collect()
with open(logfilename, 'a') as log:
log.write('min_EPE: {}/{}; min_D1: {}/{}'.format(min_EPE, minEPE_epoch, min_D1, minD1_epoch))
# train one sample
def train_sample(sample, compute_metrics=False):
model.train()
imgL, imgR, disp_gt, dx_gt, dy_gt = sample['left'], sample['right'], sample['disparity'], sample['dx_gt'], sample['dy_gt']
imgL = imgL.cuda()
imgR = imgR.cuda()
disp_gt = disp_gt.cuda().unsqueeze(1)
dx_gt = dx_gt.cuda().unsqueeze(1)
dy_gt = dy_gt.cuda().unsqueeze(1)
mask = (disp_gt < args.maxdisp) & (disp_gt > 0)
optimizer.zero_grad()
outputs = model(imgL, imgR)
init_cv_pyramid = outputs["init_cv_pyramid"]
prop_disp_pyramid = outputs["prop_disp_pyramid"]
dx_pyramid = outputs["dx_pyramid"]
dy_pyramid = outputs["dy_pyramid"]
w_pyramid = outputs["w_pyramid"]
loss = global_loss(init_cv_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid,
disp_gt, dx_gt, dy_gt, args.maxdisp)
scalar_outputs = {"weighted_loss_sum": loss}
image_outputs = {"disp_est": prop_disp_pyramid, "disp_gt": disp_gt, "imgL": imgL, "imgR": imgR, "dx_gt": dx_gt, "dy_gt": dy_gt,
"dx_pyramid": dx_pyramid, "dy_pyramid": dy_pyramid, "w_pyramid": w_pyramid,
}
# pdb.set_trace()
if compute_metrics:
with torch.no_grad():
image_outputs["errormap"] = [disp_error_image_func()(disp_est.squeeze(1), disp_gt.squeeze(1)) for disp_est in prop_disp_pyramid]
scalar_outputs["EPE"] = [EPE_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1)) for disp_est in prop_disp_pyramid]
scalar_outputs["D1"] = [D1_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1)) for disp_est in prop_disp_pyramid]
scalar_outputs["Thres1"] = [Thres_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1), 1.0) for disp_est in prop_disp_pyramid]
scalar_outputs["Thres2"] = [Thres_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1), 2.0) for disp_est in prop_disp_pyramid]
scalar_outputs["Thres3"] = [Thres_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1), 3.0) for disp_est in prop_disp_pyramid]
loss.backward()
optimizer.step()
return tensor2float(loss), tensor2float(scalar_outputs), image_outputs
# test one sample
@make_nograd_func
def test_sample(sample, compute_metrics=True):
model.eval()
imgL, imgR, disp_gt = sample['left'], sample['right'], sample['disparity']
imgL = imgL.cuda()
imgR = imgR.cuda()
disp_gt = disp_gt.cuda().unsqueeze(1)
outputs = model(imgL, imgR)
prop_disp_pyramid = outputs['prop_disp_pyramid']
mask = (disp_gt < args.maxdisp) & (disp_gt > 0)
loss = torch.mean((prop_loss(torch.abs(prop_disp_pyramid[0] - disp_gt)))[mask])
scalar_outputs = {"loss": loss}
image_outputs = {"disp_est": prop_disp_pyramid, "disp_gt": disp_gt, "imgL": imgL, "imgR": imgR}
scalar_outputs["D1"] = [D1_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1)) for disp_est in prop_disp_pyramid]
scalar_outputs["EPE"] = [EPE_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1)) for disp_est in prop_disp_pyramid]
scalar_outputs["Thres1"] = [Thres_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1), 1.0) for disp_est in prop_disp_pyramid]
scalar_outputs["Thres2"] = [Thres_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1), 2.0) for disp_est in prop_disp_pyramid]
scalar_outputs["Thres3"] = [Thres_metric(disp_est.squeeze(1), disp_gt.squeeze(1), mask.squeeze(1), 3.0) for disp_est in prop_disp_pyramid]
if compute_metrics:
image_outputs["errormap"] = [disp_error_image_func()(disp_est.squeeze(1), disp_gt.squeeze(1)) for disp_est in prop_disp_pyramid]
return tensor2float(loss), tensor2float(scalar_outputs), image_outputs
if __name__ == '__main__':
train()
| 13,595 | 50.5 | 150 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/models/tile_update.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .FE import BasicConv2d
from .tile_warping import TileWarping, TileWarping1
from .submodules import DispUpsampleBySlantedPlane, SlantDUpsampleBySlantedPlaneT4T4, SlantD2xUpsampleBySlantedPlaneT4T2
import pdb
from utils.write_pfm import write_pfm_tensor
class ResBlock(nn.Module):
"""
Residual Block without BN but with dilation
"""
def __init__(self, inplanes, out_planes, hid_planes, add_relu=True):
super(ResBlock, self).__init__()
self.add_relu = add_relu
self.conv1 = nn.Sequential(nn.Conv2d(inplanes, hid_planes, 3, 1, 1, 1),
nn.LeakyReLU(inplace=True, negative_slope=0.2))
self.conv2 = nn.Conv2d(hid_planes, out_planes, 3, 1, 1, 1)
if add_relu:
self.relu = nn.LeakyReLU(inplace=True, negative_slope=0.2)
def forward(self, x):
out = self.conv1(x)
out = self.conv2(out)
out += x
if self.add_relu:
out = self.relu(out)
return out
class TileUpdate(nn.Module):
"""
Tile Update for a single resolution
forward input: fea duo from current resolution, tile hypothesis from current and previous resolution
forward output: refined tile hypothesis and confidence (if available)
"""
def __init__(self, in_c, out_c, hid_c, resblk_num, args):
super(TileUpdate, self).__init__()
self.disp_upsample = SlantDUpsampleBySlantedPlaneT4T4(2)
self.tile_warping = TileWarping(args)
self.prop_warp0 = BasicConv2d(48, 16, 1, 1, 0, 1)
self.prop_warp1 = BasicConv2d(48, 16, 1, 1, 0, 1)
self.conv0 = BasicConv2d(in_c, hid_c, 1, 1, 0, 1)
resblks = nn.ModuleList()
for i in range(resblk_num):
resblks.append(ResBlock(hid_c, hid_c, hid_c))
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, 1, 1, 0, 1, bias=False)
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, fea_r, current_hypothesis, previous_hypothesis=None):
current_tile_local_cv = self.tile_warping(current_hypothesis[:, :3, :, :], fea_l, fea_r)
current_tile_local_cv = self.prop_warp0(current_tile_local_cv)
aug_current_tile_hypothesis = torch.cat([current_hypothesis, current_tile_local_cv], 1)
if previous_hypothesis is None:
aug_hypothesis_set = aug_current_tile_hypothesis
else:
previous_tile_d = previous_hypothesis[:, 0, :, :].unsqueeze(1) # multiply 2 when passing to slant upsampling
previous_tile_dx = previous_hypothesis[:, 1, :, :].unsqueeze(1) # h direction
previous_tile_dy = previous_hypothesis[:, 2, :, :].unsqueeze(1) # w direction
up_previous_tile_d = self.disp_upsample(previous_tile_d, previous_tile_dx, previous_tile_dy)
# pdb.set_trace()
up_previous_tile_dx_dy = self.upsample(previous_hypothesis[:, 1:3, :, :])
up_previous_tile_dscrpt = self.upsample(previous_hypothesis[:, 3:, :, :])
up_previous_tile_dx_dy_dscrpt = torch.cat([up_previous_tile_dx_dy, up_previous_tile_dscrpt], dim=1)
up_previous_tile_plane = torch.cat([up_previous_tile_d, up_previous_tile_dx_dy_dscrpt[:, :2, :, :]], 1)
up_previous_tile_local_cv = self.tile_warping(up_previous_tile_plane, fea_l, fea_r)
up_previous_tile_local_cv = self.prop_warp1(up_previous_tile_local_cv)
aug_up_previous_tile_hypothesis = torch.cat([up_previous_tile_d, up_previous_tile_dx_dy_dscrpt, up_previous_tile_local_cv], 1)
aug_hypothesis_set = torch.cat([aug_current_tile_hypothesis, aug_up_previous_tile_hypothesis], 1)
tile_hypothesis_update = self.conv0(aug_hypothesis_set)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
if previous_hypothesis is None:
refined_hypothesis = current_hypothesis + tile_hypothesis_update
# pdb.set_trace()
refined_hypothesis[:, :1, :, :] = F.relu(refined_hypothesis[:, :1, :, :].clone())
# pdb.set_trace()
return [refined_hypothesis]
else:
conf = tile_hypothesis_update[:, :2, :, :] # [:, 0, :, :] is for pre
previous_delta_hypothesis = tile_hypothesis_update[:, 2:18, :, :]
current_delta_hypothesis = tile_hypothesis_update[:, 18:34, :, :]
_, hypothesis_select_mask = torch.max(conf, dim=1, keepdim=True)
hypothesis_select_mask = hypothesis_select_mask.float()
# 1: current is larger, this mask is used to select current
inverse_hypothesis_select_mask = 1 - hypothesis_select_mask
# 1: previous is larger, this mask is used to select previous
update_current_hypothesis = current_hypothesis + current_delta_hypothesis
# tmp = F.relu(update_current_hypothesis[:, :1, :, :])
update_current_hypothesis[:, :1, :, :] = F.relu(update_current_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
update_previous_hypothesis = torch.cat([up_previous_tile_d, up_previous_tile_dx_dy_dscrpt], 1) + previous_delta_hypothesis
# tmp = F.relu(update_previous_hypothesis[:, :1, :, :])
update_previous_hypothesis[:, :1, :, :] = F.relu(update_previous_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
refined_hypothesis = hypothesis_select_mask * update_current_hypothesis + inverse_hypothesis_select_mask * update_previous_hypothesis
pre_conf = conf[:, :1, :, :]
cur_conf = conf[:, 1:2, :, :]
update_current_disp = update_current_hypothesis[:, :1, :, :]
update_previous_disp = update_previous_hypothesis[:, :1, :, :]
update_current_dx = update_current_hypothesis[:, 1:2, :, :]
update_previous_dx = update_previous_hypothesis[:, 1:2, :, :]
update_current_dy = update_current_hypothesis[:, 2:3, :, :]
update_previous_dy = update_previous_hypothesis[:, 2:3, :, :]
# pdb.set_trace()
return [
refined_hypothesis,
update_current_disp, update_previous_disp,
update_current_dx, update_previous_dx,
update_current_dy, update_previous_dy,
cur_conf, pre_conf,
]
class PostTileUpdateNoUp(nn.Module):
"""
No hyp upsampling, equal to pure refinement, for 1/4 res
"""
def __init__(self, in_c, out_c, hid_c, resblk_num, args):
super(PostTileUpdateNoUp, self).__init__()
self.conv0 = BasicConv2d(in_c, hid_c, 1, 1, 0, 1)
self.conv1 = BasicConv2d(hid_c, hid_c, 3, 1, 1, 1)
resblks = nn.ModuleList()
for i in range(resblk_num):
resblks.append(ResBlock(hid_c, hid_c, hid_c))
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, 3, 1, 1, 1, bias=False)
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, previous_hypothesis):
# pdb.set_trace()
guided_up_previous_tile_hypothesis = torch.cat([previous_hypothesis, fea_l], 1)
tile_hypothesis_update = self.conv0(guided_up_previous_tile_hypothesis)
tile_hypothesis_update = self.conv1(tile_hypothesis_update)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = previous_hypothesis + tile_hypothesis_update
refined_hypothesis[:, :1, :, :] = F.relu(refined_hypothesis[:, :1, :, :].clone())
# pdb.set_trace()
return refined_hypothesis
class PostTileUpdate(nn.Module):
"""
Post Tile Update for a single resolution: decrease tile size, e.g. upsampling tile hypothesis, and do tile warping
forward input: fea duo from the largest resolution, tile hypothesis from previous resolution
forward output: refined tile hypothesis
"""
def __init__(self, in_c, out_c, hid_c, resblk_num, slant_disp_up, args):
super(PostTileUpdate, self).__init__()
self.disp_upsample = slant_disp_up
self.conv0 = BasicConv2d(in_c, hid_c, 1, 1, 0, 1)
self.conv1 = BasicConv2d(hid_c, hid_c, 3, 1, 1, 1)
resblks = nn.ModuleList()
for i in range(resblk_num):
resblks.append(ResBlock(hid_c, hid_c, hid_c))
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, 3, 1, 1, 1, bias=False)
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, previous_hypothesis):
previous_tile_d = previous_hypothesis[:, 0, :, :].unsqueeze(1)
previous_tile_dx = previous_hypothesis[:, 1, :, :].unsqueeze(1) # h direction
previous_tile_dy = previous_hypothesis[:, 2, :, :].unsqueeze(1) # w direction
up_previous_tile_d = self.disp_upsample(previous_tile_d, previous_tile_dx, previous_tile_dy)
up_previous_tile_dx_dy = self.upsample(previous_hypothesis[:, 1:3, :, :])
up_previous_tile_dscrpt = self.upsample(previous_hypothesis[:, 3:, :, :])
up_previous_tile_hypothesis = torch.cat([up_previous_tile_d, up_previous_tile_dx_dy, up_previous_tile_dscrpt], 1)
# pdb.set_trace()
guided_up_previous_tile_hypothesis = torch.cat([up_previous_tile_hypothesis, fea_l], 1)
tile_hypothesis_update = self.conv0(guided_up_previous_tile_hypothesis)
tile_hypothesis_update = self.conv1(tile_hypothesis_update)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = up_previous_tile_hypothesis + tile_hypothesis_update
# tmp = F.relu(refined_hypothesis[:, :1, :, :])
refined_hypothesis[:, :1, :, :] = F.relu(refined_hypothesis[:, :1, :, :].clone()) # Force disp to be positive
# pdb.set_trace()
return refined_hypothesis
class FinalTileUpdate(nn.Module):
"""
Final Tile Update: only predicts disp
forward input: fea duo from the largest resolution, tile hypothesis from previous resolution
forward output: refined tile hypothesis
"""
def __init__(self, in_c, out_c, hid_c, resblk_num, slant_disp_up, args):
super(FinalTileUpdate, self).__init__()
self.disp_upsample = slant_disp_up
self.conv0 = BasicConv2d(in_c, hid_c, 1, 1, 0, 1)
self.conv1 = BasicConv2d(hid_c, hid_c, 3, 1, 1, 1)
resblks = nn.ModuleList()
for i in range(resblk_num):
resblks.append(ResBlock(hid_c, hid_c, hid_c))
self.resblocks = nn.Sequential(*resblks)
self.lastconv = nn.Conv2d(hid_c, out_c, 3, 1, 1, 1, bias=False)
self.upsample = nn.UpsamplingNearest2d(scale_factor=2)
# For final disparity and each supervision signal to be positive
self.relu = nn.ReLU()
def forward(self, fea_l, previous_hypothesis):
previous_tile_d = previous_hypothesis[:, 0, :, :].unsqueeze(1)
previous_tile_dx = previous_hypothesis[:, 1, :, :].unsqueeze(1) # h direction
previous_tile_dy = previous_hypothesis[:, 2, :, :].unsqueeze(1) # w direction
up_previous_tile_d = self.disp_upsample(previous_tile_d, previous_tile_dx, previous_tile_dy)
up_previous_tile_dx_dy = self.upsample(previous_hypothesis[:, 1:3, :, :])
up_previous_tile_dscrpt = self.upsample(previous_hypothesis[:, 3:, :, :])
up_previous_tile_hypothesis = torch.cat([up_previous_tile_d, up_previous_tile_dx_dy, up_previous_tile_dscrpt], 1)
# pdb.set_trace()
guided_up_previous_tile_hypothesis = torch.cat([up_previous_tile_hypothesis, fea_l], 1)
tile_hypothesis_update = self.conv0(guided_up_previous_tile_hypothesis)
tile_hypothesis_update = self.conv1(tile_hypothesis_update)
tile_hypothesis_update = self.resblocks(tile_hypothesis_update)
tile_hypothesis_update = self.lastconv(tile_hypothesis_update)
refined_hypothesis = up_previous_tile_d+ tile_hypothesis_update
refined_hypothesis = F.relu(refined_hypothesis.clone()) # Force disp to be positive
# pdb.set_trace()
return refined_hypothesis
| 12,738 | 53.67382 | 145 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/models/FE.py | import torch.nn.functional as F
import torch
import torch.nn as nn
class feature_extraction_conv(nn.Module):
"""
UNet for HITNet
"""
def __init__(self, args):
super(feature_extraction_conv, self).__init__()
self.conv1x_0 = nn.Sequential(
BasicConv2d(3, 16, 3, 1, 1, 1),
BasicConv2d(16, 16, 3, 1, 1, 1),
)
self.conv2x_0 = nn.Sequential(
BasicConv2d(16, 16, 2, 2, 0, 1),
BasicConv2d(16, 16, 3, 1, 1, 1),
BasicConv2d(16, 16, 3, 1, 1, 1)
)
self.conv4x_0 = nn.Sequential(
BasicConv2d(16, 24, 2, 2, 0, 1),
BasicConv2d(24, 24, 3, 1, 1, 1),
BasicConv2d(24, 24, 3, 1, 1, 1)
)
self.conv8x_0 = nn.Sequential(
BasicConv2d(24, 24, 2, 2, 0, 1),
BasicConv2d(24, 24, 3, 1, 1, 1),
BasicConv2d(24, 24, 3, 1, 1, 1)
)
self.conv16x_0 = nn.Sequential(
BasicConv2d(24, 32, 2, 2, 0, 1),
BasicConv2d(32, 32, 3, 1, 1, 1),
BasicConv2d(32, 32, 3, 1, 1, 1)
)
self.conv16_8x_0 = unetUp(32, 24, 24)
self.conv8_4x_0 = unetUp(24, 24, 24)
self.conv4_2x_0 = unetUp(24, 16, 16)
self.conv2_1x_0 = unetUp(16, 16, 16)
self.last_conv_1x = nn.Conv2d(16, 16, 1, 1, 0, 1, bias=False)
self.last_conv_2x = nn.Conv2d(16, 16, 1, 1, 0, 1, bias=False)
self.last_conv_4x = nn.Conv2d(24, 24, 1, 1, 0, 1, bias=False)
self.last_conv_8x = nn.Conv2d(24, 24, 1, 1, 0, 1, bias=False)
self.last_conv_16x = nn.Conv2d(32, 32, 1, 1, 0, 1, bias=False)
def forward(self, x):
layer1x_0 = self.conv1x_0(x)
layer2x_0 = self.conv2x_0(layer1x_0)
layer4x_0 = self.conv4x_0(layer2x_0)
layer8x_0 = self.conv8x_0(layer4x_0)
layer16x_0 = self.conv16x_0(layer8x_0)
layer8x_1 = self.conv16_8x_0(layer16x_0, layer8x_0)
layer4x_1 = self.conv8_4x_0(layer8x_1, layer4x_0)
layer2x_1 = self.conv4_2x_0(layer4x_1, layer2x_0)
layer1x_1 = self.conv2_1x_0(layer2x_1, layer1x_0)
layer16x_1 = self.last_conv_16x(layer16x_0)
layer8x_2 = self.last_conv_8x(layer8x_1)
layer4x_2 = self.last_conv_4x(layer4x_1)
layer2x_2 = self.last_conv_2x(layer2x_1)
layer1x_2 = self.last_conv_1x(layer1x_1)
return [layer16x_1, layer8x_2, layer4x_2, layer2x_2, layer1x_2] # 1/16, 1/8, 1/4, 1/2, 1/1
def BasicConv2d(in_channels, out_channels, kernel_size, stride, pad, dilation):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride,
padding=dilation if dilation > 1 else pad, dilation=dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True, negative_slope=0.2),
)
def BasicTransposeConv2d(in_channels, out_channels, kernel_size, stride, pad, dilation):
output_pad = stride + 2 * pad - kernel_size * dilation + dilation - 1
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channels, kernel_size, stride, pad, output_pad, dilation, bias=False),
nn.BatchNorm2d(out_channels),
nn.LeakyReLU(inplace=True, negative_slope=0.2),
)
class unetUp(nn.Module):
def __init__(self, in_c1, in_c2, out_c):
super(unetUp, self).__init__()
self.up_conv1 = BasicTransposeConv2d(in_c1, in_c1//2, 2, 2, 0, 1)
self.reduce_conv2 = BasicConv2d(in_c1//2+in_c2, out_c, 1, 1, 0, 1)
self.conv = nn.Sequential(
BasicConv2d(out_c, out_c, 3, 1, 1, 1),
)
def forward(self, inputs1, inputs2): # small scale, large scale
layer1 = self.up_conv1(inputs1)
layer2 = self.reduce_conv2(torch.cat([layer1, inputs2], 1))
output = self.conv(layer2)
return output
| 3,853 | 36.784314 | 114 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/models/HITNet.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .FE import feature_extraction_conv
from .initialization import INIT
from .tile_warping import TileWarping
from .tile_update import TileUpdate, PostTileUpdate, FinalTileUpdate, PostTileUpdateNoUp
from models.submodules import DispUpsampleBySlantedPlane, SlantDUpsampleBySlantedPlaneT4T4, SlantD2xUpsampleBySlantedPlaneT4T2
import pdb
from utils.write_pfm import write_pfm_tensor
class HITNet(nn.Module):
def __init__(self, args):
super().__init__()
self.feature_extractor = feature_extraction_conv(args)
self.tile_init = INIT(args)
self.tile_warp = TileWarping(args)
self.tile_update0 = TileUpdate(32, 16, 32, 2, args) # 1/16 tile refine
self.tile_update1 = TileUpdate(64, 34, 32, 2, args) # 1/8 tile refine
self.tile_update2 = TileUpdate(64, 34, 32, 2, args) # 1/4 tile refine
self.tile_update3 = TileUpdate(64, 34, 32, 2, args) # 1/2 tile refine
self.tile_update4 = TileUpdate(64, 34, 32, 2, args) # 1/1 tile refine
self.tile_update4_1 = PostTileUpdateNoUp(40, 16, 32, 4, args) # 1/1 tile refine
self.tile_update5 = PostTileUpdate(32, 16, 32, 4, SlantD2xUpsampleBySlantedPlaneT4T2(), args) # 2/1 tile refine tile_size=2
self.tile_update6 = FinalTileUpdate(32, 1, 16, 2, DispUpsampleBySlantedPlane(2, 2), args) # 2/1 tile refine tile_size=1
# For training phase, we need to upsample disps using slant equation
self.prop_disp_upsample64x = DispUpsampleBySlantedPlane(64)
self.prop_disp_upsample32x = DispUpsampleBySlantedPlane(32)
self.prop_disp_upsample16x = DispUpsampleBySlantedPlane(16)
self.prop_disp_upsample8x = DispUpsampleBySlantedPlane(8)
self.prop_disp_upsample4x = DispUpsampleBySlantedPlane(4)
self.prop_disp_upsample2x = DispUpsampleBySlantedPlane(2, 2)
# For training phase, we need to upsample dx and dy using nearest interpolation
self.dxdy_upsample64x = nn.UpsamplingNearest2d(scale_factor=64)
self.dxdy_upsample32x = nn.UpsamplingNearest2d(scale_factor=32)
self.dxdy_upsample16x = nn.UpsamplingNearest2d(scale_factor=16)
self.dxdy_upsample8x = nn.UpsamplingNearest2d(scale_factor=8)
self.dxdy_upsample4x = nn.UpsamplingNearest2d(scale_factor=4)
self.dxdy_upsample2x = nn.UpsamplingNearest2d(scale_factor=2)
# For final disparity and each supervision signal to be positive
# self.relu = nn.ReLU(inplace=True)
def forward(self, left_img, right_img):
left_fea_pyramid = self.feature_extractor(left_img)
right_fea_pyramid = self.feature_extractor(right_img)
init_cv_pyramid, init_tile_pyramid = self.tile_init(left_fea_pyramid, right_fea_pyramid)
refined_tile16x = self.tile_update0(left_fea_pyramid[0], right_fea_pyramid[0], init_tile_pyramid[0])[0]
tile_update8x = self.tile_update1(left_fea_pyramid[1], right_fea_pyramid[1], init_tile_pyramid[1], refined_tile16x)
tile_update4x = self.tile_update2(left_fea_pyramid[2], right_fea_pyramid[2], init_tile_pyramid[2], tile_update8x[0])
tile_update2x = self.tile_update3(left_fea_pyramid[3], right_fea_pyramid[3], init_tile_pyramid[3], tile_update4x[0])
tile_update1x = self.tile_update4(left_fea_pyramid[4], right_fea_pyramid[4], init_tile_pyramid[4], tile_update2x[0])
refined_tile1x = self.tile_update4_1(left_fea_pyramid[2], tile_update1x[0])
refined_tile05x = self.tile_update5(left_fea_pyramid[3], refined_tile1x)
refined_tile025x = self.tile_update6(left_fea_pyramid[4], refined_tile05x)
final_disp = refined_tile025x
# pdb.set_trace()
if self.training:
prop_disp16_fx = self.prop_disp_upsample64x(refined_tile16x[:, :1, :, :], refined_tile16x[:, 1:2, :, :], refined_tile16x[:, 2:3, :, :])
prop_disp8_fx_cur = self.prop_disp_upsample32x(tile_update8x[1], tile_update8x[3], tile_update8x[5])
prop_disp8_fx_pre = self.prop_disp_upsample32x(tile_update8x[2], tile_update8x[4], tile_update8x[6])
prop_disp4_fx_cur = self.prop_disp_upsample16x(tile_update4x[1], tile_update4x[3], tile_update4x[5])
prop_disp4_fx_pre = self.prop_disp_upsample16x(tile_update4x[2], tile_update4x[4], tile_update4x[6])
prop_disp2_fx_cur = self.prop_disp_upsample8x(tile_update2x[1], tile_update2x[3], tile_update2x[5])
prop_disp2_fx_pre = self.prop_disp_upsample8x(tile_update2x[2], tile_update2x[4], tile_update2x[6])
prop_disp1_fx_cur = self.prop_disp_upsample4x(tile_update1x[1], tile_update1x[3], tile_update1x[5])
prop_disp1_fx_pre = self.prop_disp_upsample4x(tile_update1x[2], tile_update1x[4], tile_update1x[6])
prop_disp1_fx = self.prop_disp_upsample4x(refined_tile1x[:, :1, :, :], refined_tile1x[:, 1:2, :, :], refined_tile1x[:, 2:3, :, :])
prop_disp05_fx = self.prop_disp_upsample2x(refined_tile05x[:, :1, :, :], refined_tile05x[:, 1:2, :, :], refined_tile05x[:, 2:3, :, :])
prop_disp_pyramid = [
prop_disp16_fx,
prop_disp8_fx_cur,
prop_disp8_fx_pre,
prop_disp4_fx_cur,
prop_disp4_fx_pre,
prop_disp2_fx_cur,
prop_disp2_fx_pre,
prop_disp1_fx_cur,
prop_disp1_fx_pre,
prop_disp1_fx,
prop_disp05_fx,
final_disp
]
# WARNING: EACH PYRAMID MUST ALIGN ACCORDING TO PRE-CUR ORDER AND RESOLUTION ORDER SINCE SUPERVISION WOULDN'T SEE THE ORDER
dx16_fx = self.dxdy_upsample64x(refined_tile16x[:, 1:2, :, :])
dx8_fx_cur = self.dxdy_upsample32x(tile_update8x[3])
dx8_fx_pre = self.dxdy_upsample32x(tile_update8x[4])
dx4_fx_cur = self.dxdy_upsample16x(tile_update4x[3])
dx4_fx_pre = self.dxdy_upsample16x(tile_update4x[4])
dx2_fx_cur = self.dxdy_upsample8x(tile_update2x[3])
dx2_fx_pre = self.dxdy_upsample8x(tile_update2x[4])
dx1_fx_cur = self.dxdy_upsample4x(tile_update1x[3])
dx1_fx_pre = self.dxdy_upsample4x(tile_update1x[4])
dx1_fx = self.dxdy_upsample4x(refined_tile1x[:, 1:2, :, :])
dx05_fx = self.dxdy_upsample2x(refined_tile05x[:, 1:2, :, :])
dx_pyramid = [
dx16_fx,
dx8_fx_cur,
dx8_fx_pre,
dx4_fx_cur,
dx4_fx_pre,
dx2_fx_cur,
dx2_fx_pre,
dx1_fx_cur,
dx1_fx_pre,
dx1_fx,
dx05_fx,
]
dy16_fx = self.dxdy_upsample64x(refined_tile16x[:, 2:3, :, :])
dy8_fx_cur = self.dxdy_upsample32x(tile_update8x[5])
dy8_fx_pre = self.dxdy_upsample32x(tile_update8x[6])
dy4_fx_cur = self.dxdy_upsample16x(tile_update4x[5])
dy4_fx_pre = self.dxdy_upsample16x(tile_update4x[6])
dy2_fx_cur = self.dxdy_upsample8x(tile_update2x[5])
dy2_fx_pre = self.dxdy_upsample8x(tile_update2x[6])
dy1_fx_cur = self.dxdy_upsample4x(tile_update1x[5])
dy1_fx_pre = self.dxdy_upsample4x(tile_update1x[6])
dy1_fx = self.dxdy_upsample4x(refined_tile1x[:, 2:3, :, :])
dy05_fx = self.dxdy_upsample2x(refined_tile05x[:, 2:3, :, :])
dy_pyramid = [
dy16_fx,
dy8_fx_cur,
dy8_fx_pre,
dy4_fx_cur,
dy4_fx_pre,
dy2_fx_cur,
dy2_fx_pre,
dy1_fx_cur,
dy1_fx_pre,
dy1_fx,
dy05_fx,
]
conf8_fx_cur = self.dxdy_upsample32x(tile_update8x[7])
conf8_fx_pre = self.dxdy_upsample32x(tile_update8x[8])
conf4_fx_cur = self.dxdy_upsample16x(tile_update4x[7])
conf4_fx_pre = self.dxdy_upsample16x(tile_update4x[8])
conf2_fx_cur = self.dxdy_upsample8x(tile_update2x[7])
conf2_fx_pre = self.dxdy_upsample8x(tile_update2x[8])
conf1_fx_cur = self.dxdy_upsample4x(tile_update1x[7])
conf1_fx_pre = self.dxdy_upsample4x(tile_update1x[8])
w_pyramid = [
conf8_fx_cur,
conf8_fx_pre,
conf4_fx_cur,
conf4_fx_pre,
conf2_fx_cur,
conf2_fx_pre,
conf1_fx_cur,
conf1_fx_pre,
]
outputs = {
"init_cv_pyramid": init_cv_pyramid,
"prop_disp_pyramid": prop_disp_pyramid,
"dx_pyramid": dx_pyramid,
"dy_pyramid": dy_pyramid,
"w_pyramid": w_pyramid,
}
# pdb.set_trace()
return outputs
else:
prop_disp_pyramid = [final_disp]
return {
"prop_disp_pyramid": prop_disp_pyramid,
}
# pdb.set_trace()
| 9,149 | 51.285714 | 147 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/models/initialization.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from .FE import BasicConv2d
import pdb
from .submodules import BuildVolume2d
class INIT(nn.Module):
"""
Tile hypothesis initialization
input: dual feature pyramid
output: initial tile hypothesis pyramid
"""
def __init__(self, args):
super().__init__()
self.maxdisp = args.maxdisp
fea_c1x = args.fea_c[4]
fea_c2x = args.fea_c[3]
fea_c4x = args.fea_c[2]
fea_c8x = args.fea_c[1]
fea_c16x = args.fea_c[0]
self.tile_conv1x = nn.Sequential(
BasicConv2d(fea_c1x, fea_c1x, 4, 4, 0, 1),
nn.Conv2d(fea_c1x, fea_c1x, 1, 1, 0, bias=False)
)
self.tile_conv2x = nn.Sequential(
BasicConv2d(fea_c2x, fea_c2x, 4, 4, 0, 1),
nn.Conv2d(fea_c2x, fea_c2x, 1, 1, 0, bias=False)
)
self.tile_conv4x = nn.Sequential(
BasicConv2d(fea_c4x, fea_c4x, 4, 4, 0, 1),
nn.Conv2d(fea_c4x, fea_c4x, 1, 1, 0, bias=False)
)
self.tile_conv8x = nn.Sequential(
BasicConv2d(fea_c8x, fea_c8x, 4, 4, 0, 1),
nn.Conv2d(fea_c8x, fea_c8x, 1, 1, 0, bias=False)
)
self.tile_conv16x = nn.Sequential(
BasicConv2d(fea_c16x, fea_c16x, 4, 4, 0, 1),
nn.Conv2d(fea_c16x, fea_c16x, 1, 1, 0, bias=False)
)
self.tile_fea_dscrpt16x = BasicConv2d(fea_c16x+1, 13, 1, 1, 0, 1)
self.tile_fea_dscrpt8x = BasicConv2d(fea_c8x+1, 13, 1, 1, 0, 1)
self.tile_fea_dscrpt4x = BasicConv2d(fea_c4x+1, 13, 1, 1, 0, 1)
self.tile_fea_dscrpt2x = BasicConv2d(fea_c2x+1, 13, 1, 1, 0, 1)
self.tile_fea_dscrpt1x = BasicConv2d(fea_c1x+1, 13, 1, 1, 0, 1)
self._build_volume_2d16x = BuildVolume2d(self.maxdisp//16)
self._build_volume_2d8x = BuildVolume2d(self.maxdisp//8)
self._build_volume_2d4x = BuildVolume2d(self.maxdisp//4)
self._build_volume_2d2x = BuildVolume2d(self.maxdisp//2)
self._build_volume_2d1x = BuildVolume2d(self.maxdisp)
def tile_features(self, fea_l, fea_r):
right_fea_pad = [0, 3, 0, 0]
# pdb.set_trace()
tile_fea_l1x = self.tile_conv1x(fea_l[-1])
padded_fea_r1x = F.pad(fea_r[-1], right_fea_pad)
self.tile_conv1x[0][0].stride = (4, 1)
tile_fea_r1x = self.tile_conv1x(padded_fea_r1x)
self.tile_conv1x[0][0].stride = (4, 4)
tile_fea_l2x = self.tile_conv2x(fea_l[-2])
padded_fea_r2x = F.pad(fea_r[-2], right_fea_pad)
self.tile_conv2x[0][0].stride = (4, 1)
tile_fea_r2x = self.tile_conv2x(padded_fea_r2x)
self.tile_conv2x[0][0].stride = (4, 4)
tile_fea_l4x = self.tile_conv4x(fea_l[-3])
padded_fea_r4x = F.pad(fea_r[-3], right_fea_pad)
self.tile_conv4x[0][0].stride = (4, 1)
tile_fea_r4x = self.tile_conv4x(padded_fea_r4x)
self.tile_conv4x[0][0].stride = (4, 4)
tile_fea_l8x = self.tile_conv8x(fea_l[-4])
padded_fea_r8x = F.pad(fea_r[-4], right_fea_pad)
self.tile_conv8x[0][0].stride = (4, 1)
tile_fea_r8x = self.tile_conv8x(padded_fea_r8x)
self.tile_conv8x[0][0].stride = (4, 4)
tile_fea_l16x = self.tile_conv16x(fea_l[-5])
padded_fea_r16x = F.pad(fea_r[-5], right_fea_pad)
self.tile_conv16x[0][0].stride = (4, 1)
tile_fea_r16x = self.tile_conv16x(padded_fea_r16x)
self.tile_conv16x[0][0].stride = (4, 4)
return [
[tile_fea_l16x, tile_fea_r16x],
[tile_fea_l8x, tile_fea_r8x],
[tile_fea_l4x, tile_fea_r4x],
[tile_fea_l2x, tile_fea_r2x],
[tile_fea_l1x, tile_fea_r1x],
]
def tile_hypothesis_pyramid(self, tile_feature_pyramid):
init_tile_cost16x = self._build_volume_2d16x(tile_feature_pyramid[0][0], tile_feature_pyramid[0][1])
init_tile_cost8x = self._build_volume_2d8x(tile_feature_pyramid[1][0], tile_feature_pyramid[1][1])
init_tile_cost4x = self._build_volume_2d4x(tile_feature_pyramid[2][0], tile_feature_pyramid[2][1])
init_tile_cost2x = self._build_volume_2d2x(tile_feature_pyramid[3][0], tile_feature_pyramid[3][1])
init_tile_cost1x = self._build_volume_2d1x(tile_feature_pyramid[4][0], tile_feature_pyramid[4][1])
min_tile_cost16x, min_tile_disp16x = torch.min(init_tile_cost16x, 1)
min_tile_cost8x, min_tile_disp8x = torch.min(init_tile_cost8x, 1)
min_tile_cost4x, min_tile_disp4x = torch.min(init_tile_cost4x, 1)
min_tile_cost2x, min_tile_disp2x = torch.min(init_tile_cost2x, 1)
min_tile_cost1x, min_tile_disp1x = torch.min(init_tile_cost1x, 1)
min_tile_cost16x = torch.unsqueeze(min_tile_cost16x, 1)
min_tile_cost8x = torch.unsqueeze(min_tile_cost8x, 1)
min_tile_cost4x = torch.unsqueeze(min_tile_cost4x, 1)
min_tile_cost2x = torch.unsqueeze(min_tile_cost2x, 1)
min_tile_cost1x = torch.unsqueeze(min_tile_cost1x, 1)
min_tile_disp16x = min_tile_disp16x.float().unsqueeze(1)
min_tile_disp8x = min_tile_disp8x.float().unsqueeze(1)
min_tile_disp4x = min_tile_disp4x.float().unsqueeze(1)
min_tile_disp2x = min_tile_disp2x.float().unsqueeze(1)
min_tile_disp1x = min_tile_disp1x.float().unsqueeze(1)
tile_dscrpt16x = self.tile_fea_dscrpt16x(torch.cat([min_tile_cost16x, tile_feature_pyramid[0][0]], 1))
tile_dscrpt8x = self.tile_fea_dscrpt8x(torch.cat([min_tile_cost8x, tile_feature_pyramid[1][0]], 1))
tile_dscrpt4x = self.tile_fea_dscrpt4x(torch.cat([min_tile_cost4x, tile_feature_pyramid[2][0]], 1))
tile_dscrpt2x = self.tile_fea_dscrpt2x(torch.cat([min_tile_cost2x, tile_feature_pyramid[3][0]], 1))
tile_dscrpt1x = self.tile_fea_dscrpt1x(torch.cat([min_tile_cost1x, tile_feature_pyramid[4][0]], 1))
tile_dx16x = torch.zeros_like(min_tile_disp16x)
tile_dx8x = torch.zeros_like(min_tile_disp8x)
tile_dx4x = torch.zeros_like(min_tile_disp4x)
tile_dx2x = torch.zeros_like(min_tile_disp2x)
tile_dx1x = torch.zeros_like(min_tile_disp1x)
tile_dy16x = torch.zeros_like(min_tile_disp16x)
tile_dy8x = torch.zeros_like(min_tile_disp8x)
tile_dy4x = torch.zeros_like(min_tile_disp4x)
tile_dy2x = torch.zeros_like(min_tile_disp2x)
tile_dy1x = torch.zeros_like(min_tile_disp1x)
# pdb.set_trace()
tile_hyp16x = torch.cat([min_tile_disp16x, tile_dx16x, tile_dy16x, tile_dscrpt16x], 1)
tile_hyp8x = torch.cat([min_tile_disp8x, tile_dx8x, tile_dy8x, tile_dscrpt8x], 1)
tile_hyp4x = torch.cat([min_tile_disp4x, tile_dx4x, tile_dy4x, tile_dscrpt4x], 1)
tile_hyp2x = torch.cat([min_tile_disp2x, tile_dx2x, tile_dy2x, tile_dscrpt2x], 1)
tile_hyp1x = torch.cat([min_tile_disp1x, tile_dx1x, tile_dy1x, tile_dscrpt1x], 1)
return [
[
init_tile_cost16x,
init_tile_cost8x,
init_tile_cost4x,
init_tile_cost2x,
init_tile_cost1x,
],
[
tile_hyp16x,
tile_hyp8x,
tile_hyp4x,
tile_hyp2x,
tile_hyp1x,
]
]
def forward(self, fea_l_pyramid, fea_r_pyramid):
tile_feature_duo_pyramid = self.tile_features(fea_l_pyramid, fea_r_pyramid)
init_cv_pyramid, init_hypo_pyramid = self.tile_hypothesis_pyramid(tile_feature_duo_pyramid)
return [init_cv_pyramid, init_hypo_pyramid]
| 7,608 | 41.038674 | 110 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/models/submodules.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class DispUpsampleBySlantedPlane(nn.Module):
def __init__(self, upscale, ts=4):
super(DispUpsampleBySlantedPlane, self).__init__()
self.upscale = upscale
self.center = (upscale - 1) / 2
self.DUC = nn.PixelShuffle(upscale)
self.ts = ts
def forward(self, tile_disp, tile_dx, tile_dy):
tile_disp = tile_disp * (self.upscale / self.ts)
disp0 = [] # for each pixel, upsampled disps are stored in channel dimension
for i in range(self.upscale):
for j in range(self.upscale):
disp0.append(tile_disp + (i - self.center) * tile_dx + (j - self.center) * tile_dy)
disp0 = torch.cat(disp0, 1) # [B, upscale**2, H/upscale, W/upscale]
disp1 = self.DUC(disp0) # [B, 1, H/1, W/1]
return disp1
class SlantDUpsampleBySlantedPlaneT4T4(nn.Module):
"""
Slant map upsampling, input tile size = 4x4, output tile size = 4x4
"""
def __init__(self, upscale):
super(SlantDUpsampleBySlantedPlaneT4T4, self).__init__()
self.upscale = upscale
self.center = 4 * (upscale - 1) / 2
self.DUC = nn.PixelShuffle(upscale)
def forward(self, tile_disp, tile_dx, tile_dy):
tile_disp = tile_disp * self.upscale
disp0 = [] # for each pixel, upsampled disps are stored in channel dimension
for i in range(self.upscale):
for j in range(self.upscale):
disp0.append(tile_disp + (i * 4 - self.center) * tile_dx + (j * 4 - self.center) * tile_dy)
disp0 = torch.cat(disp0, 1) # [B, upscale**2, H/upscale, W/upscale]
disp1 = self.DUC(disp0) # [B, 1, H/1, W/1]
return disp1
class SlantD2xUpsampleBySlantedPlaneT4T2(nn.Module):
"""
Slant map upsampling 2x, input tile size = 4x4, output tile size = 2x2
"""
def __init__(self):
super(SlantD2xUpsampleBySlantedPlaneT4T2, self).__init__()
self.DUC = nn.PixelShuffle(2)
def forward(self, tile_disp, tile_dx, tile_dy):
disp0 = [] # for each pixel, upsampled disps are stored in channel dimension
for i in range(2):
for j in range(2):
disp0.append(tile_disp + (i * 2 - 1) * tile_dx + (j * 2 - 1) * tile_dy)
disp0 = torch.cat(disp0, 1) # [B, upscale**2, H/upscale, W/upscale]
disp1 = self.DUC(disp0) # [B, 1, H/1, W/1]
return disp1
class BuildVolume2d(nn.Module):
def __init__(self, maxdisp):
super(BuildVolume2d, self).__init__()
self.maxdisp = maxdisp
def forward(self, feat_l, feat_r):
padded_feat_r = F.pad(feat_r, [self.maxdisp-1, 0, 0, 0])
cost = torch.zeros((feat_l.size()[0], self.maxdisp, feat_l.size()[2], feat_l.size()[3]), device='cuda')
for i in range(0, self.maxdisp):
if i > 0:
# pdb.set_trace()
cost[:, i, :, :] = torch.norm(feat_l[:, :, :, :] - padded_feat_r[:, :, :, self.maxdisp-1-i:-i:4], 1, 1)
else:
# pdb.set_trace()
cost[:, i, :, :] = torch.norm(feat_l[:, :, :, :] - padded_feat_r[:, :, :, self.maxdisp-1::4], 1, 1)
return cost.contiguous() # B*D*H*W
class BuildVolume2dChaos(nn.Module):
def __init__(self):
super(BuildVolume2dChaos, self).__init__()
def forward(self, refimg_fea, targetimg_fea, disps):
B, C, H, W = refimg_fea.shape
batch_disp = torch.unsqueeze(disps, dim=2).view(-1, 1, H, W)
batch_feat_l = refimg_fea[:, None, :, :, :].repeat(1, disps.shape[1], 1, 1, 1).view(-1, C, H, W)
batch_feat_r = targetimg_fea[:, None, :, :, :].repeat(1, disps.shape[1], 1, 1, 1).view(-1, C, H, W)
warped_batch_feat_r = warp(batch_feat_r, batch_disp)
volume = torch.norm(batch_feat_l - warped_batch_feat_r, 1, 1).view(B, disps.shape[1], H, W)
volume = volume.contiguous()
return volume
def warp(x, disp):
"""
warp an image/tensor (im2) back to im1, according to the optical flow
x: [B, C, H, W] (im2)
flo: [B, 2, H, W] flow
"""
B, C, H, W = x.size()
# mesh grid
xx = torch.arange(0, W, device=x.device).view(1, -1).repeat(H, 1)
yy = torch.arange(0, H, device=x.device).view(-1, 1).repeat(1, W)
xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1)
yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1)
vgrid = torch.cat((xx, yy), 1).float()
# vgrid = Variable(grid)
vgrid[:,:1,:,:] = vgrid[:,:1,:,:] - disp
# scale grid to [-1,1]
vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :].clone() / max(W - 1, 1) - 1.0
vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :].clone() / max(H - 1, 1) - 1.0
vgrid = vgrid.permute(0, 2, 3, 1)
output = F.grid_sample(x, vgrid)
return output
| 4,805 | 38.393443 | 119 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/models/tile_warping.py | import torch
import torch.nn as nn
import torch.nn.functional as F
import pdb
from .submodules import DispUpsampleBySlantedPlane, BuildVolume2dChaos
class TileWarping(nn.Module):
def __init__(self, args):
super(TileWarping, self).__init__()
self.disp_up = DispUpsampleBySlantedPlane(4)
self.build_l1_volume_chaos = BuildVolume2dChaos()
def forward(self, tile_plane: torch.Tensor, fea_l: torch.Tensor, fea_r: torch.Tensor):
"""
local cost volume
:param tile_plane: d, dx, dy
:param fea_l:
:param fea_r:
:return: local cost volume
"""
tile_d = tile_plane[:, 0, :, :].unsqueeze(1)
tile_dx = tile_plane[:, 1, :, :].unsqueeze(1) # h direction
tile_dy = tile_plane[:, 2, :, :].unsqueeze(1) # w direction
local_cv = []
for disp_d in range(-1, 2):
flatten_local_disp_ws_disp_d = self.disp_up(tile_d + disp_d, tile_dx, tile_dy)
cv_ws_disp_d = self.build_l1_volume_chaos(fea_l, fea_r, flatten_local_disp_ws_disp_d)
local_cv_ws_disp_d = [] # local cost volume in one disp hypothesis [B, 16, H/4, W/4]
for i in range(4):
for j in range(4):
local_cv_ws_disp_d.append(cv_ws_disp_d[:, :, i::4, j::4])
local_cv_ws_disp_d = torch.cat(local_cv_ws_disp_d, 1)
local_cv.append(local_cv_ws_disp_d) # local cost volume containing all the disp hypothesis[B, 48, H/4, W/4]
# pdb.set_trace()
local_cv = torch.cat(local_cv, 1)
return local_cv
class TileWarping1(nn.Module):
"""
Functionality same as TileWarping but with variable tile size
"""
def __init__(self, tile_size, args):
super(TileWarping1, self).__init__()
self.tile_size = tile_size
self.center = (tile_size - 1) / 2
self.disp_up = DispUpsampleBySlantedPlane(tile_size)
self.build_l1_volume_chaos = BuildVolume2dChaos()
def forward(self, tile_plane: torch.Tensor, fea_l: torch.Tensor, fea_r: torch.Tensor):
"""
local cost volume
:param tile_plane: d, dx, dy
:param fea_l:
:param fea_r:
:return: local cost volume
"""
tile_d = tile_plane[:, 0, :, :].unsqueeze(1)
tile_dx = tile_plane[:, 1, :, :].unsqueeze(1) # h direction
tile_dy = tile_plane[:, 2, :, :].unsqueeze(1) # w direction
local_cv = []
for disp_d in range(-1, 2):
flatten_local_disp_ws_disp_d = self.disp_up(tile_d + disp_d, tile_dx, tile_dy)
cv_ws_disp_d = self.build_l1_volume_chaos(fea_l, fea_r, flatten_local_disp_ws_disp_d)
local_cv_ws_disp_d = [] # local cost volume in one disp hypothesis [B, 16, H/4, W/4]
for i in range(self.tile_size):
for j in range(self.tile_size):
local_cv_ws_disp_d.append(cv_ws_disp_d[:, :, i::self.tile_size, j::self.tile_size])
local_cv_ws_disp_d = torch.cat(local_cv_ws_disp_d, 1)
local_cv.append(local_cv_ws_disp_d) # local cost volume containing all the disp hypothesis[B, 48, H/4, W/4]
# pdb.set_trace()
local_cv = torch.cat(local_cv, 1)
return local_cv
| 3,259 | 42.466667 | 120 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/datasets/kitti_dataset.py | import os
import random
from torch.utils.data import Dataset
from PIL import Image
import numpy as np
from datasets.data_io import get_transform, read_all_lines, pfm_imread
import torchvision.transforms.functional as photometric
import pdb
class KITTIDataset(Dataset):
def __init__(self, datapath, list_filename, training):
self.datapath = datapath
self.left_filenames, self.right_filenames, self.disp_filenames, self.dx_gt_filenames, self.dy_gt_filenames = \
self.load_path(list_filename)
self.training = training
if self.training:
assert self.disp_filenames is not None
def load_path(self, list_filename):
lines = read_all_lines(list_filename)
splits = [line.split() for line in lines]
left_images = [x[0] for x in splits]
right_images = [x[1] for x in splits]
if len(splits[0]) == 2: # ground truth not available
return left_images, right_images, None, None, None
else:
disp_images = [x[2] for x in splits]
dx_gt = [x[3] for x in splits]
dy_gt = [x[4] for x in splits]
return left_images, right_images, disp_images, dx_gt, dy_gt
def load_image(self, filename):
return Image.open(filename).convert('RGB')
def load_disp(self, filename):
data = Image.open(filename)
data = np.array(data, dtype=np.float32) / 256.
return data
def load_dx_dy(self, filename):
data, scale = pfm_imread(filename)
data = np.ascontiguousarray(data, dtype=np.float32)
return data
def __len__(self):
return len(self.left_filenames)
def __getitem__(self, index):
left_img = self.load_image(os.path.join(self.datapath, self.left_filenames[index]))
right_img = self.load_image(os.path.join(self.datapath, self.right_filenames[index]))
if self.disp_filenames: # has disparity ground truth
disparity = self.load_disp(os.path.join(self.datapath, self.disp_filenames[index]))
else:
disparity = None
if self.dx_gt_filenames and self.dy_gt_filenames: # has disparity slant param ground truth
dx_gt = self.load_dx_dy(os.path.join(self.datapath, self.dx_gt_filenames[index]))
dy_gt = self.load_dx_dy(os.path.join(self.datapath, self.dy_gt_filenames[index]))
else:
dx_gt = None
dy_gt = None
if self.training:
w, h = left_img.size
crop_w, crop_h = 1152, 320 # similar to crops of HITNet paper, but multiple of 64
x1 = random.randint(0, w - crop_w)
y1 = random.randint(0, h - crop_h)
# random crop
left_img = left_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
right_img = right_img.crop((x1, y1, x1 + crop_w, y1 + crop_h))
disparity = disparity[y1:y1 + crop_h, x1:x1 + crop_w]
dx_gt = dx_gt[y1:y1 + crop_h, x1:x1 + crop_w]
dy_gt = dy_gt[y1:y1 + crop_h, x1:x1 + crop_w]
# photometric augmentation: brightness and contrast perturb
sym_random_brt = np.random.uniform(0.8, 1.2)
sym_random_cts = np.random.uniform(0.8, 1.2)
asym_random_brt = np.random.uniform(0.95, 1.05, size=2)
asym_random_cts = np.random.uniform(0.95, 1.05, size=2)
# brightness
left_img = photometric.adjust_brightness(left_img, sym_random_brt)
right_img = photometric.adjust_brightness(right_img, sym_random_brt)
left_img = photometric.adjust_brightness(left_img, asym_random_brt[0])
right_img = photometric.adjust_brightness(right_img, asym_random_brt[1])
# contrast
left_img = photometric.adjust_contrast(left_img, sym_random_cts)
right_img = photometric.adjust_contrast(right_img, sym_random_cts)
left_img = photometric.adjust_contrast(left_img, asym_random_cts[0])
right_img = photometric.adjust_contrast(right_img, asym_random_cts[1])
# to tensor, normalize
processed = get_transform()
left_img = processed(left_img)
right_img = processed(right_img)
# random patch exchange of right image
patch_h = random.randint(50, 180)
patch_w = random.randint(50, 250)
patch1_x = random.randint(0, crop_h-patch_h)
patch1_y = random.randint(0, crop_w-patch_w)
patch2_x = random.randint(0, crop_h-patch_h)
patch2_y = random.randint(0, crop_w-patch_w)
# pdb.set_trace()
# print(right_img.shape)
img_patch = right_img[:, patch2_x:patch2_x+patch_h, patch2_y:patch2_y+patch_w]
right_img[:, patch1_x:patch1_x+patch_h, patch1_y:patch1_y+patch_w] = img_patch
return {"left": left_img,
"right": right_img,
"disparity": disparity,
"dx_gt": dx_gt,
"dy_gt": dy_gt}
else:
w, h = left_img.size
# normalize
processed = get_transform()
left_img = processed(left_img).numpy()
right_img = processed(right_img).numpy()
# pad to size 1280x384
top_pad = 384 - h
right_pad = 1280 - w
assert top_pad > 0 and right_pad > 0
# pad images
left_img = np.lib.pad(left_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)
right_img = np.lib.pad(right_img, ((0, 0), (top_pad, 0), (0, right_pad)), mode='constant',
constant_values=0)
# pad disparity gt
if disparity is not None:
assert len(disparity.shape) == 2
disparity = np.lib.pad(disparity, ((top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)
# pad dx and dy gt
if dx_gt is not None and dy_gt is not None:
assert len(dx_gt.shape) == 2
dx_gt = np.lib.pad(dx_gt, ((top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)
assert len(dy_gt.shape) == 2
dy_gt = np.lib.pad(dy_gt, ((top_pad, 0), (0, right_pad)), mode='constant', constant_values=0)
if disparity is not None and dx_gt is not None and dy_gt is not None:
return {"left": left_img,
"right": right_img,
"disparity": disparity,
"top_pad": top_pad,
"right_pad": right_pad,
"dx_gt": dx_gt,
"dy_gt": dy_gt}
else:
return {"left": left_img,
"right": right_img,
"top_pad": top_pad,
"right_pad": right_pad,
"left_filename": self.left_filenames[index],
"right_filename": self.right_filenames[index]}
| 7,074 | 43.21875 | 119 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/datasets/data_io.py | import numpy as np
import re
import torchvision.transforms as transforms
def get_transform():
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
return transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=mean, std=std),
])
# read all lines in a file
def read_all_lines(filename):
with open(filename) as f:
lines = [line.rstrip() for line in f.readlines()]
return lines
# read an .pfm file into numpy array, used to load SceneFlow disparity files
def pfm_imread(filename):
file = open(filename, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().decode('utf-8').rstrip()
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
| 1,439 | 23.40678 | 78 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/datasets/__init__.py | from .kitti_dataset import KITTIDataset
__datasets__ = {
"kitti": KITTIDataset
}
| 86 | 13.5 | 39 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/loss/total_loss.py | import torch
import torch.nn.functional as F
from loss.initialization_loss import init_loss
from loss.propagation_loss import prop_loss, slant_loss, w_loss
def global_loss(init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid,
d_gt, dx_gt, dy_gt, maxdisp,
lambda_init=1, lambda_prop=1, lambda_slant=1, lambda_w=1):
"""
:param init_cv_cost_pyramid:
:param prop_disp_pyramid:
:param slant_pyramid:
:param w_pyramid:
:param d_gt:
:param maxdisp:
:param loss_init:
:param loss_prop:
:param loss_slant:
:param loss_w:
:param lambda_init:
:param lambda_prop:
:param lambda_slant:
:param lambda_w:
:return:
"""
# if len(d_gt.shape) == 3:
# d_gt = d_gt.unsqueeze(1)
# if len(dx_gt.shape) == 3:
# dx_gt = dx_gt.unsqueeze(1)
# if len(dy_gt.shape) == 3:
# dy_gt = dy_gt.unsqueeze(1)
d_gt_pyramid = []
for i in range(len(init_cv_cost_pyramid)):
scale = 4 * (2 ** i) # 4,8,16,32,64
d_gt_pyramid.append(torch.nn.MaxPool2d(scale, scale)(d_gt)/(scale/4))
d_gt_pyramid.reverse() # disp ground truth generation. From small to large.
init_loss_pyramid = []
for i, cv in enumerate(init_cv_cost_pyramid):
# pdb.set_trace()
mask = (d_gt_pyramid[i] > 0) & (d_gt_pyramid[i] < maxdisp/(2**(len(init_cv_cost_pyramid)-1-i)))
init_loss_pyramid.append(
lambda_init * init_loss(cv, d_gt_pyramid[i], maxdisp/(2**(len(init_cv_cost_pyramid)-1-i)))[mask]
)
# pdb.set_trace()
init_loss_vec = torch.cat(init_loss_pyramid, dim=0) # 1-dim vector
# pdb.set_trace()
prop_loss_pyramid = [] # masked
prop_diff_pyramid = [] # not masked
mask = (d_gt > 0) & (d_gt < maxdisp)
prop_loss_weights = [1/64, 1/32, 1/32, 1/16, 1/16, 1/8, 1/8, 1/4, 1/4, 1/4, 1/2, 1]
for i, disp in enumerate(prop_disp_pyramid):
prop_diff_pyramid.append(
torch.abs(d_gt - disp)
)
prop_loss_pyramid.append(
lambda_prop * prop_loss_weights[i] * prop_loss(prop_diff_pyramid[-1], 10000)[mask]
)
# pdb.set_trace()
prop_loss_vec = torch.cat(prop_loss_pyramid, dim=0)
# pdb.set_trace()
slant_loss_pyramid = []
slant_loss_weights = [1/64, 1/32, 1/32, 1/16, 1/16, 1/8, 1/8, 1/4, 1/4, 1/4, 1/2]
for i in range(len(dx_pyramid)):
# print(i)
slant_loss_pyramid.append(
lambda_slant * slant_loss_weights[i] * slant_loss(dx_pyramid[i], dy_pyramid[i], dx_gt, dy_gt, prop_diff_pyramid[i], mask)
)
slant_loss_vec = torch.cat(slant_loss_pyramid, dim=0)
# pdb.set_trace()
w_loss_pyramid = []
w_loss_weights = [1/32, 1/32, 1/16, 1/16, 1/8, 1/8, 1/4, 1/4]
for i, w in enumerate(w_pyramid):
w_loss_pyramid.append(
lambda_w * w_loss_weights[i] * w_loss(w, prop_diff_pyramid[i+1], mask) # index for prop_diff_pyramid plus 1 since there is no confidence at 1st level
)
w_loss_vec = torch.cat(w_loss_pyramid, dim=0)
# pdb.set_trace()
total_loss_vec = torch.cat([init_loss_vec, prop_loss_vec, slant_loss_vec, w_loss_vec], dim=0)
# pdb.set_trace()
return torch.mean(total_loss_vec)
# def total_loss(init_cv_pyramid, prop_disp_pyramid, slant_pyramid, w_pyramid, d_gt_pyramid):
# """
# calculate final loss
#
# :param init_cv_pyramid: output of init module of network. None in the post-prop (16x, 8x, 4x, 2x, 1x)
# :param prop_disp_pyramid: output hypothesis disparity of prop module(64x, 32x, 16x, 8x, 4x, 2x, 1x)
# :param slant_pyramid: dx and dy of slants(64x, 32x, 16x, 8x, 4x, 2x, 1x)
# :param w_pyramid: confidence. none in the first and post-prop(32x, 16x, 8x, 4x)
# :param d_gt_pyramid: disparity groundtruth pyramid (from small to large)(64x, 32x, 16x, 8x, 4x, 2x, 1x)
# :return: scalar, weighted sum of all loss
# """
# for i, hyp in enumerate(d_gt_pyramid):
# if i == 0:
# loss_init = ini
# loss_vec = loss.reshape(-1)
# if i == 0:
# all_loss_vec = loss_vec
# else:
# all_loss_vec = torch.cat([all_loss_vec, loss_vec], 0)
#
# return torch.mean(all_loss_vec)
if __name__ == '__main__':
import pdb
import os
import time
os.environ["CUDA_VISIBLE_DEVICES"] = "9"
maxdisp = 256
img_h = 256
img_w = 512
bs = 4
M = 5 # corresponding to M=4 in paper
init_cv_cost_pyramid = []
for i in range(M):
scale = 2**(M-i-1)
init_cv_cost_pyramid.append(
torch.rand(bs, maxdisp//scale, img_h//(scale*4), img_w//(scale*4)).cuda() * 5.
)
prop_disp_pyramid = []
dx_pyramid = []
dy_pyramid = []
for i in range(M+2):
prop_disp_pyramid.append(
(torch.rand(bs, 1, img_h, img_w).cuda() - 0.5) * 10.
)
dx_pyramid.append(
torch.rand(bs, 1, img_h, img_w).cuda() - 0.5
)
dy_pyramid.append(
torch.rand(bs, 1, img_h, img_w).cuda() - 0.5
)
w_pyramid = []
for i in range(M-1):
w_pyramid.append(
torch.rand(bs, 1, img_h, img_w).cuda()
)
d_gt = (torch.rand(bs, 1, img_h, img_w).cuda() - 0.5) * 10.
dx_gt = torch.rand(bs, 1, img_h, img_w).cuda() - 0.5
dy_gt = torch.rand(bs, 1, img_h, img_w).cuda() - 0.5
for _ in range(1):
st_time = time.time()
loss = global_loss(init_cv_cost_pyramid, prop_disp_pyramid, dx_pyramid, dy_pyramid, w_pyramid,
d_gt, dx_gt, dy_gt, maxdisp)
print('Time: {:.3f}'.format(time.time() - st_time))
pdb.set_trace()
| 5,696 | 33.95092 | 162 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/loss/initialization_loss.py | import torch
import torch.nn.functional as F
import pdb
def init_loss(pred_init_cost: torch.Tensor, d_gt: torch.Tensor, maxdisp, beta=1):
"""
Initialization loss, HITNet paper eqt(10
:param pred_init_cost:
:param d_gt:
:param beta:
:return: init loss [B*1*H*W]
"""
cost_gt = subpix_cost(pred_init_cost, d_gt, maxdisp)
cost_nm = torch.gather(pred_init_cost, 1, get_non_match_disp(pred_init_cost, d_gt))
loss = cost_gt + F.relu(beta - cost_nm)
# pdb.set_trace()
return loss
def subpix_cost(cost: torch.Tensor, disp: torch.Tensor, maxdisp: int):
"""
phi, e.g. eqt(9) in HITNet paper
:param cost:
:param disp:
:return:
"""
# pdb.set_trace()
disp[disp >= maxdisp - 1] = maxdisp - 2
disp[disp < 0] = 0
disp_floor = disp.floor()
sub_cost = (disp - disp_floor) * torch.gather(cost, 1, disp_floor.long()+1) + (disp_floor + 1 - disp) * torch.gather(cost, 1, disp_floor.long())
# pdb.set_trace()
return sub_cost
def get_non_match_disp(pred_init_cost: torch.Tensor, d_gt: torch.Tensor):
"""
HITNet paper, eqt (11)
:param pred_init_cost: B, D, H, W
:param d_gt: B, 1, H, W
:return: LongTensor: min_non_match_disp: B, 1, H, W
"""
B, D, H, W = pred_init_cost.size()
disp_cand = torch.arange(0, D, step=1, device=d_gt.device).view(1, -1, 1, 1).repeat(B, 1, H, W).float()
match_disp_lower_bound = d_gt - 1.5
match_disp_upper_bound = d_gt + 1.5
INF = torch.Tensor([float("Inf")]).view(1, 1, 1, 1).repeat(B, D, H, W).to(d_gt.device)
tmp_cost = torch.where((disp_cand < match_disp_lower_bound) | (disp_cand > match_disp_upper_bound), pred_init_cost, INF)
# pdb.set_trace()
__, min_non_match_disp = torch.min(tmp_cost, dim=1, keepdim=True)
# pdb.set_trace()
return min_non_match_disp
#
# if __name__ == '__main__':
# cost = torch.rand(1, 12, 2, 2)
# d_gt = torch.rand(1, 1, 2, 2)*4
# output_cost = init_loss(cost, d_gt)
# pdb.set_trace()
| 2,006 | 30.857143 | 148 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/loss/propagation_loss.py | import torch
import torch.nn.functional as F
import pdb
import math
def prop_loss(d_diff, A=1, alpha=1, c=0.1):
"""
Loss from HITNet eqt(12
:param d_diff: d^gt - d^
:param A: The truncation value
:param alpha: shape param
:param c > 0: scale param
:return: torch.Tensor: L^prop [B*1*H*W]
"""
rho = echo_loss(d_diff, alpha, c)
A = torch.ones_like(rho) * A
loss = torch.where(rho < A, rho, A)
# pdb.set_trace()
return loss
def echo_loss(x, alpha, c):
"""
An amazing loss function presented in paper: A General and Adaptive Robust Loss Function (CVPR 2019).
The name prefix 'echo' is the name of a hero in Overwatch who can become any other hero during her ultimate
:param x: torch.Tensor
:param alpha: shape param
:param c > 0: scale param
:return: torch.Tensor: loss
"""
loss = (abs(alpha - 2) / alpha) * ((((x / c)**2) / abs(alpha - 2) + 1)**(alpha / 2) - 1)
return loss
def slant_loss(dx, dy, dx_gt, dy_gt, d_diff, mask, B=1):
closer_mask = d_diff < B
mask = mask * closer_mask # mask and
slant_diff = torch.cat([dx_gt-dx, dy_gt-dy], dim=1)
loss = torch.norm(slant_diff, p=1, dim=1, keepdim=True)[mask]
# print('slant_loss: {:.3f}'.format(loss.mean()))
# print('dx_gt mean: {:.3f}, dy_gt mean: {:.3f}'.format(dx_gt.mean(), dy_gt.mean()))
return loss # 1-dim vector
def w_loss(conf, diff, mask, C1=1, C2=1.5):
"""
:param conf: aka omega
:param diff: d^gt - d^
:param C1:
:param C2:
:return: torch.Tensor: loss
"""
closer_mask = diff < C1
further_mask = diff > C2
mask = mask * (closer_mask + further_mask) # mask and
closer_item = F.relu(1 - conf)
further_item = F.relu(conf)
# pdb.set_trace()
loss = closer_item * closer_mask.float() + further_item * further_mask.float()
return loss[mask] # 1-dim vector
# if __name__ == '__main__':
# cost = torch.rand(1, 12, 2, 2)
# conf = (torch.rand(1, 1, 2, 2).cuda() - 1) * 4
# d_gt = torch.rand(1, 1, 2, 2) * 4
# d_pred = torch.rand(1, 1, 2, 2) * 4
# d_diff = d_gt.cuda() - d_pred.cuda()
# prop_loss = w_loss(conf, d_diff, 1, 1)
# pdb.set_trace()
| 2,213 | 29.328767 | 111 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/utils/write_pfm.py | import numpy as np
import re
import sys
from utils.experiment import tensor2numpy
def write_pfm_tensor(file, image, scale = 1):
image = tensor2numpy(image)
image = np.array(image, dtype='float32')
file = open(file, 'wb')
color = None
if image.dtype.name != 'float32':
raise Exception('Image dtype must be float32.')
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1: # greyscale
color = False
else:
raise Exception('Image must have H x W x 3, H x W x 1 or H x W dimensions.')
file.write(b'PF\n' if color else b'Pf\n')
file.write(b'%d %d\n' % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == '<' or endian == '=' and sys.byteorder == 'little':
scale = -scale
file.write(b'%f\n' % scale)
image = np.flipud(image)
image.tofile(file) | 922 | 24.638889 | 90 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/utils/visualization.py | from __future__ import print_function
import torch
import torch.nn as nn
import torch.utils.data
from torch.autograd import Variable, Function
import torch.nn.functional as F
import math
import numpy as np
def gen_error_colormap():
cols = np.array(
[[0 / 3.0, 0.1875 / 3.0, 49, 54, 149],
[0.1875 / 3.0, 0.375 / 3.0, 69, 117, 180],
[0.375 / 3.0, 0.75 / 3.0, 116, 173, 209],
[0.75 / 3.0, 1.5 / 3.0, 171, 217, 233],
[1.5 / 3.0, 3 / 3.0, 224, 243, 248],
[3 / 3.0, 6 / 3.0, 254, 224, 144],
[6 / 3.0, 12 / 3.0, 253, 174, 97],
[12 / 3.0, 24 / 3.0, 244, 109, 67],
[24 / 3.0, 48 / 3.0, 215, 48, 39],
[48 / 3.0, np.inf, 165, 0, 38]], dtype=np.float32)
cols[:, 2: 5] /= 255.
return cols
error_colormap = gen_error_colormap()
class disp_error_image_func(Function):
def forward(self, D_est_tensor, D_gt_tensor, abs_thres=3., rel_thres=0.05, dilate_radius=1):
D_gt_np = D_gt_tensor.detach().cpu().numpy()
D_est_np = D_est_tensor.detach().cpu().numpy()
B, H, W = D_gt_np.shape
# valid mask
mask = D_gt_np > 0
# error in percentage. When error <= 1, the pixel is valid since <= 3px & 5%
error = np.abs(D_gt_np - D_est_np)
error[np.logical_not(mask)] = 0
error[mask] = np.minimum(error[mask] / abs_thres, (error[mask] / D_gt_np[mask]) / rel_thres)
# get colormap
cols = error_colormap
# create error image
error_image = np.zeros([B, H, W, 3], dtype=np.float32)
for i in range(cols.shape[0]):
error_image[np.logical_and(error >= cols[i][0], error < cols[i][1])] = cols[i, 2:]
# TODO: imdilate
# error_image = cv2.imdilate(D_err, strel('disk', dilate_radius));
error_image[np.logical_not(mask)] = 0.
# show color tag in the top-left cornor of the image
for i in range(cols.shape[0]):
distance = 20
error_image[:, :10, i * distance:(i + 1) * distance, :] = cols[i, 2:]
return torch.from_numpy(np.ascontiguousarray(error_image.transpose([0, 3, 1, 2])))
def backward(self, grad_output):
return None
| 2,198 | 36.271186 | 100 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/utils/experiment.py | from __future__ import print_function, division
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.utils.data
from torch.autograd import Variable
import torchvision.utils as vutils
import torch.nn.functional as F
import numpy as np
import copy
def make_iterative_func(func):
def wrapper(vars):
if isinstance(vars, list):
return [wrapper(x) for x in vars]
elif isinstance(vars, tuple):
return tuple([wrapper(x) for x in vars])
elif isinstance(vars, dict):
return {k: wrapper(v) for k, v in vars.items()}
else:
return func(vars)
return wrapper
def make_nograd_func(func):
def wrapper(*f_args, **f_kwargs):
with torch.no_grad():
ret = func(*f_args, **f_kwargs)
return ret
return wrapper
@make_iterative_func
def tensor2float(vars):
if isinstance(vars, float):
return vars
elif isinstance(vars, torch.Tensor):
return vars.data.item()
else:
raise NotImplementedError("invalid input type for tensor2float")
@make_iterative_func
def tensor2numpy(vars):
if isinstance(vars, np.ndarray):
return vars
elif isinstance(vars, torch.Tensor):
return vars.data.cpu().numpy()
else:
raise NotImplementedError("invalid input type for tensor2numpy")
@make_iterative_func
def check_allfloat(vars):
assert isinstance(vars, float)
def save_scalars(logger, mode_tag, scalar_dict, global_step):
scalar_dict = tensor2float(scalar_dict)
for tag, values in scalar_dict.items():
if not isinstance(values, list) and not isinstance(values, tuple):
values = [values]
for idx, value in enumerate(values):
scalar_name = '{}/{}'.format(mode_tag, tag)
# if len(values) > 1:
scalar_name = scalar_name + "_" + str(idx)
logger.add_scalar(scalar_name, value, global_step)
def save_images(logger, mode_tag, images_dict, global_step):
images_dict = tensor2numpy(images_dict)
# print('images_dict shape: {}'.format(images_dict.shape))
for tag, values in images_dict.items():
if not isinstance(values, list) and not isinstance(values, tuple):
values = [values]
# print('values len: {}'.format(len(values)))
for idx, value in enumerate(values):
# print('value shape: {}'.format(value.shape))
if len(value.shape) == 3:
value = value[:, np.newaxis, :, :]
value = value[:1]
value = torch.from_numpy(value)
image_name = '{}/{}'.format(mode_tag, tag)
if len(values) > 1:
image_name = image_name + "_" + str(idx)
logger.add_image('img_' + image_name, vutils.make_grid(value, padding=0, nrow=1, normalize=True, scale_each=True),
global_step)
def save_hist(logger, mode_tag, in_tensor, global_step):
images_dict = tensor2numpy(in_tensor)
# print('images_dict shape: {}'.format(images_dict.shape))
for tag, values in images_dict.items():
if not isinstance(values, list) and not isinstance(values, tuple):
values = [values]
print('values len: {}'.format(len(values)))
for idx, value in enumerate(values):
print('value shape: {}'.format(value.shape))
if len(value.shape) == 3:
value = value[:, np.newaxis, :, :]
value = value[:1]
value = torch.from_numpy(value)
value = value.view(-1)
image_name = '{}/{}'.format(mode_tag, tag)
if len(values) > 1:
image_name = image_name + "_" + str(idx)
logger.add_histogram(image_name, value, global_step)
def adjust_learning_rate(optimizer, epoch, base_lr, lrepochs):
splits = lrepochs.split(':')
assert len(splits) == 2
# parse the epochs to downscale the learning rate (before :)
downscale_epochs = [int(eid_str) for eid_str in splits[0].split(',')]
# parse downscale rate (after :)
downscale_rate = [float(eid_str) for eid_str in splits[1].split(',')]
print("downscale epochs: {}, downscale rate: {}".format(downscale_epochs, downscale_rate))
lr = base_lr
for eid, downscale_rate in zip(downscale_epochs, downscale_rate):
if epoch >= eid:
lr /= downscale_rate
else:
break
print("setting learning rate to {}".format(lr))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
class AverageMeter(object):
def __init__(self):
self.sum_value = 0.
self.count = 0
def update(self, x):
check_allfloat(x)
self.sum_value += x
self.count += 1
def mean(self):
return self.sum_value / self.count
class AverageMeterDict(object):
def __init__(self):
self.data = None
self.count = 0
def update(self, x):
check_allfloat(x)
self.count += 1
if self.data is None:
self.data = copy.deepcopy(x)
else:
for k1, v1 in x.items():
if isinstance(v1, float):
self.data[k1] += v1
elif isinstance(v1, tuple) or isinstance(v1, list):
for idx, v2 in enumerate(v1):
self.data[k1][idx] += v2
else:
assert NotImplementedError("error input type for update AvgMeterDict")
def mean(self):
@make_iterative_func
def get_mean(v):
return v / float(self.count)
return get_mean(self.data)
| 5,660 | 31.164773 | 126 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/utils/saver.py | import os
import shutil
import torch
from collections import OrderedDict
import glob
import torch.distributed as dist
import json
class Saver(object):
def __init__(self, args, use_dist=False):
self.args = args
self.use_dist = use_dist
# self.directory = os.path.join('run', args.dataset, args.checkname)
self.directory = args.logdir
self.runs = sorted(glob.glob(os.path.join(self.directory, 'experiment_*'))) # Sort saved results folders
self.run_id = max([int(x.split('_')[-1]) for x in self.runs]) + 1 if self.runs else 0 # Create a new folder id
self.experiment_dir = os.path.join(self.directory, 'experiment_{}'.format(str(self.run_id)))
if not os.path.exists(self.experiment_dir):
os.makedirs(self.experiment_dir)
print('Saver currently runs in {}'.format(self.experiment_dir))
def save_experiment_config(self):
"""Write experiment config to file"""
if (self.use_dist and dist.get_rank() == 0) or not self.use_dist:
logfile = os.path.join(self.experiment_dir, 'parameters.txt')
log_file = open(logfile, 'w')
log_file.write('\n')
json.dump(self.args.__dict__, log_file, indent=2)
log_file.write('\n')
log_file.close()
| 1,309 | 37.529412 | 119 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/utils/metrics.py | import torch
import torch.nn.functional as F
from utils.experiment import make_nograd_func
from torch.autograd import Variable
from torch import Tensor
# Update D1 from >3px to >=3px & >5%
# matlab code:
# E = abs(D_gt - D_est);
# n_err = length(find(D_gt > 0 & E > tau(1) & E. / abs(D_gt) > tau(2)));
# n_total = length(find(D_gt > 0));
# d_err = n_err / n_total;
def check_shape_for_metric_computation(*vars):
assert isinstance(vars, tuple)
for var in vars:
assert len(var.size()) == 3
assert var.size() == vars[0].size()
# a wrapper to compute metrics for each image individually
def compute_metric_for_each_image(metric_func):
def wrapper(D_ests, D_gts, masks, *nargs):
check_shape_for_metric_computation(D_ests, D_gts, masks)
bn = D_gts.shape[0] # batch size
results = [] # a list to store results for each image
# compute result one by one
for idx in range(bn):
# if tensor, then pick idx, else pass the same value
cur_nargs = [x[idx] if isinstance(x, (Tensor, Variable)) else x for x in nargs]
if masks[idx].float().mean() / (D_gts[idx] > 0).float().mean() < 0.1:
print("masks[idx].float().mean() too small, skip")
else:
ret = metric_func(D_ests[idx], D_gts[idx], masks[idx], *cur_nargs)
results.append(ret)
if len(results) == 0:
print("masks[idx].float().mean() too small for all images in this batch, return 0")
return torch.tensor(0, dtype=torch.float32, device=D_gts.device)
else:
return torch.stack(results).mean()
return wrapper
@make_nograd_func
@compute_metric_for_each_image
def D1_metric(D_est, D_gt, mask):
D_est, D_gt = D_est[mask], D_gt[mask]
E = torch.abs(D_gt - D_est)
err_mask = (E > 3) & (E / D_gt.abs() > 0.05)
return torch.mean(err_mask.float())
@make_nograd_func
@compute_metric_for_each_image
def Thres_metric(D_est, D_gt, mask, thres):
assert isinstance(thres, (int, float))
D_est, D_gt = D_est[mask], D_gt[mask]
E = torch.abs(D_gt - D_est)
err_mask = E > thres
return torch.mean(err_mask.float())
# NOTE: please do not use this to build up training loss
@make_nograd_func
@compute_metric_for_each_image
def EPE_metric(D_est, D_gt, mask):
D_est, D_gt = D_est[mask], D_gt[mask]
return F.l1_loss(D_est, D_gt, size_average=True)
| 2,428 | 35.80303 | 95 | py |
PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching | PyTorch-HITNet-Hierarchical-Iterative-Tile-Refinement-Network-for-Real-time-Stereo-Matching-main/utils/__init__.py | from utils.experiment import *
from utils.visualization import *
from utils.metrics import D1_metric, Thres_metric, EPE_metric
| 127 | 31 | 61 | py |
dfs-tools | dfs-tools-master/utils/dfs_msubset.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2022 Harm Brouwer <me@hbrouwer.eu>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pandas as pd
import numpy as np
import random as rnd
import scipy.stats as sps
def dfs_msubset(fn_models, num_models, num_iterations, fn_subset,
enforce_pos_inf=True, enforce_neg_inf=True):
"""Reduce model set using subset sampling.
Reduce the dimensionality of an m x n model matrix X using subset
sampling. The following procedure is repeated for "num_iterations" to
arrive at an k x n matrix X' (where k < m) that maximally reflects the
knowledge encoded in the original matrix X:
(1) Take a subset of k rows of matrix X, and call it X';
(2) Check if all columns of matrix X' are informative, and if the
reduced matrix encodes the same positive and/or negative constraints (if
enforced) as the unreduced matrix, otherwise skip to the next iteration;
(3) Compute the similarity between X and X' on the basis of the
proposition-by-proposition inference scores in X and X';
(4) If X' is the best approximation of X so far, store it;
(5) Run next iteration, and rerun from step (1);
(6) If we have reached "num_iterations", return the best X' found.
Args:
fn_models (:obj:`str`):
filename of the input model matrix.
num_models (:obj:`int`):
number of subsetted models.
num_iterations (:obj:`int`):
number of sample iterations.
fn_selected (:obj:`str`):
filename of the output model matrix.
enforce_pos_inf (:obj:`bool`):
flags whether positive inferences should be enforced.
enforce_neg_inf (:obj:`bool`):
flags whether negative inferences should be enforeced.
"""
ms = pd.read_csv(fn_models, header=None, sep=" ")
ms = ms.to_numpy()
ps = ms[0,:]
ms = ms[1:,:].astype(int)
ms_iv = inference_vector(ms)
sms = np.zeros((num_models, len(ps)))
sms_r = 0
itr = 0
while itr < num_iterations:
nsms = ms[rnd.sample(range(1,len(ms)), num_models),:]
if not(all(np.sum(nsms, axis=0) > 1)):
# print("Bad sample: Zero vectors ... redo iteration")
continue
nsms_iv = inference_vector(nsms)
if (enforce_pos_inf and not(equal_pos_inf(ms_iv, nsms_iv))):
# print("Bad sample: Missing positive inferences ... redo iteration")
continue
if (enforce_neg_inf and not(equal_neg_inf(ms_iv, nsms_iv))):
# print("Bad sample: Missing negative inferences ... redo iteration")
continue
nsms_r, _ = sps.pearsonr(ms_iv, nsms_iv)
print("Iteration: ", itr + 1, ", r=", nsms_r, sep="")
if (nsms_r > sms_r):
sms = nsms
sms_r = nsms_r
itr = itr + 1
print("\nBest r =", sms_r)
df = pd.DataFrame(sms, columns=ps)
df.to_csv(fn_subset, index=False, sep=" ")
print("\nWrote [", fn_subset,"]")
def equal_pos_inf(ms_iv, sms_iv):
"""True iff inference vectors enforce the same positive inferences.
Args:
ms_iv (:obj:`ndarray`):
inference vector of the original model matrix
sms_iv (:obj:`ndarray`):
inference vector of the subsetted model matrix
Returns:
(:obj:`bool`): True if inference vectors enforce the same positive
inferences.
"""
return all((ms_iv < 1) == (sms_iv < 1))
def equal_neg_inf(ms_iv, sms_iv):
"""True iff inference vectors enforce the same negative inferences.
Args:
ms_iv (:obj:`ndarray`):
inference vector of the original model matrix
sms_iv (:obj:`ndarray`):
inference vector of the subsetted model matrix
Returns:
(:obj:`bool`): True if inference vectors enforce the same negative
inferences.
"""
return all((ms_iv > -1) == (sms_iv > -1))
def inference_vector(ms):
"""Compute inference vector for a given model set.
Computes a vector containing the inference score of each proposition,
given each other proposition.
Args:
ms (:obj:`ndarray`): matrix of models
Returns:
(:obj:`ndarray`): vector of inference scores
"""
_, nps = ms.shape
mx = np.zeros((nps, nps))
for i1 in range(nps):
for i2 in range(nps):
mx[i1, i2] = inference(ms[:,i1], ms[:,i2])
return mx.reshape(nps ** 2)
def inference(v1, v2):
"""Compute inference score of v1 from v2.
Args:
v1 (:obj:`ndarray`): vector a
v2 (:obj:`ndarray`): vector b
Returns
(:obj:`float`): inference score of a from b
"""
pr_ab = cond_pr(v1, v2)
pr_a = prior_pr(v1)
if (pr_ab > pr_a):
return (pr_ab - pr_a) / (1.0 - pr_a)
else:
return (pr_ab - pr_a) / pr_a
def prior_pr(v):
"""Compute prior probability of a vector.
Args:
v (:obj:`ndarray`): vector a
Returns
(:obj:`float`): prior probability of a
"""
return np.sum(v) / len(v)
def conj_pr(v1, v2):
"""Compute prior conjunction probability of two vectors.
Args:
v1 (:obj:`ndarray`): vector a
v2 (:obj:`ndarray`): vector b
Returns
(:obj:`float`): conjunction probability of a and b
"""
if all(v1 == v2):
return prior_pr(v1)
else:
return prior_pr(v1 * v2)
def cond_pr(v1, v2):
"""Compute conditional probaility of v1 given v2.
Args:
v1 (:obj:`ndarray`): vector a
v2 (:obj:`ndarray`): vector b
Returns
(:obj:`float`): conditional probability of a given b
"""
return conj_pr(v1, v2) / prior_pr(v2)
| 6,259 | 29.241546 | 82 | py |
boteye | boteye-master/bin/pre_navigation.py | '''
/******************************************************************************
* Copyright 2017-2018 Baidu Robotic Vision Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/
Sample run:
python pre_navigation.py
--record_path=
--build_folder=
'''
import os
import argparse
import glob
import subprocess
import shutil
def main():
bow_proto = os.environ['HOME'] + '/XP_release/3rdparty_lib_lean/BOW.proto'
parser = argparse.ArgumentParser()
parser.add_argument('--record_path', type=str, default=None,
help='Path of the recorded data')
parser.add_argument('--build_folder', type=str, default=None, help=
'Path to custom built binaries to override the default bin locations.')
parser.add_argument('--waypoints_config', type=str, default=None, help=
'Path of way points config file.')
parser.add_argument('--priori_traj_file', type=str, default=None, help=
'Path of priori waypoints csv file')
parser.add_argument('--add_config_waypoints', type=bool, default=False,
help='Add waypoints from config file.')
parser.add_argument('--customize_trajectory', type=bool, default=False,
help='Enable waypoints customization.')
parser.add_argument('--use_priori_traj', type=bool, default=False,
help='Use priori traj csv instead of map_pb to init frames.')
parser.add_argument('--connect_adjacent_id', type=bool, default=True,
help='Generate connectivity based on waypoints adjacency.')
args = parser.parse_args()
# In releases, binaries are in $MASTER_DIR/bin
bin_folder = os.environ['MASTER_DIR'] + '/bin/'
binaries = [
bin_folder + '/dump_map_info',
bin_folder + '/waypoints_generate_map',
bin_folder + '/waypoint_graph']
if not os.path.isdir(bin_folder):
# In built binaries are assumed to be in
bin_folder = os.environ['MASTER_DIR'] + '/build/'
binaries = [
bin_folder + '/pc_apps/utils/dump_map_info',
bin_folder + '/pc_apps/navigation/waypoints_generate_map',
bin_folder + '/pc_apps/navigation/waypoint_graph']
if args.build_folder:
build_folder = args.build_folder
binaries = [
build_folder + '/pc_apps/utils/dump_map_info',
build_folder + '/pc_apps/navigation/waypoints_generate_map',
build_folder + '/pc_apps/navigation/waypoint_graph']
for binary in binaries:
if not os.path.isfile(binary):
raise ValueError(binary + ' does not exist!')
record_path = args.record_path
navigation_path = record_path + '/navigation/'
if not os.path.isdir(navigation_path):
os.mkdir(navigation_path)
shutil.copyfile(record_path + '/live.pb', navigation_path + '/navi.pb')
# mkdir -p ${NAVIGATION_PATH}
# ${BUILD_DIRECTORY}/pc_apps/utils/dump_map_info \
# -map_pb ${RECORD_PATH}/tracking.pb > ${NAVIGATION_PATH}/waypoints.csv
customize_trajectory = ''
waypoints_config = ''
add_config_waypoints = ''
priori_traj_file = ''
use_priori_traj = ''
if args.waypoints_config:
waypoints_config = '-waypoints_config ' + args.waypoints_config
if args.add_config_waypoints:
add_config_waypoints = '-add_config_waypoints'
if args.priori_traj_file:
priori_traj_file = '-priori_traj_file' + args.priori_traj_file
if args.use_priori_traj:
use_priori_traj = '-use_priori_traj'
if args.customize_trajectory:
customize_trajectory = '-customize_trajectory'
command = ' '.join([
binaries[0],
waypoints_config,
add_config_waypoints,
priori_traj_file,
use_priori_traj,
customize_trajectory,
'-map_pb ' + record_path + '/live.pb',
'>', navigation_path + '/waypoints.csv'])
print(command)
subprocess.call(command, shell = True)
# # Waypoints_generate_map: csv -> map
# ${BUILD_DIRECTORY}/pc_apps/navigation/waypoints_generate_map \
# -waypoints_file ${NAVIGATION_PATH}/waypoints.csv \
# -output_folder=${NAVIGATION_PATH}
command = ' '.join([
binaries[1],
'-waypoints_file ' + navigation_path + '/waypoints.csv',
'-output_folder ' + navigation_path])
print(command)
subprocess.call(command, shell = True)
# # Generate waypoints connectivity graph
# ${BUILD_DIRECTORY}/pc_apps/navigation/waypoint_graph \
# -waypoints_file ${NAVIGATION_PATH}/waypoints.csv \
# -waypoints_only_mode \
# -map_occupancy_file ${NAVIGATION_PATH}/map_occupancy.png \
# -map_occupancy_specs_file ${NAVIGATION_PATH}/map_occupancy.yml \
# -waypoint_graph_output ${NAVIGATION_PATH}/waypoints_graph.txt
connect_adjacent_id = '-connect_adjacent_id'
if not args.connect_adjacent_id:
connect_adjacent_id = ''
command = ' '.join([
binaries[2],
'-waypoints_file ' + navigation_path + '/waypoints.csv',
'-waypoints_only_mode',
connect_adjacent_id,
'-map_occupancy_file ' + navigation_path + '/map_occupancy.png',
'-map_occupancy_specs_file ' + navigation_path + '/map_occupancy.yml',
'-waypoint_graph_output ' + navigation_path + '/waypoints_graph.txt'])
print(command)
subprocess.call(command, shell = True)
if __name__ == "__main__":
main()
| 5,999 | 39.268456 | 85 | py |
CEAR | CEAR-main/data_process.py | import os
import json
import random
import argparse
from tqdm import tqdm
from transformers import BertTokenizer
class FewRelProcessor():
def __init__(self, args, tokenizer):
self.args = args
self.tokenizer = tokenizer
self.task_num = args.task_num
self.relation_num = 80
self.relnum_per_task = int(self.relation_num/self.task_num)
args.relation_num = self.relation_num
args.relnum_per_task = self.relnum_per_task
self.train_num = 420
self.val_num = 140
self.test_num = 140
self.task_order = None
self.read_from_order = False
self.relations = []
self.rel2id = {}
def _init_rel2id_relations(self):
with open(os.path.join(self.args.data_dir, 'FewRel', 'pid2name.json'), 'r', encoding='utf-8') as file:
pid2name = json.loads(file.read())
rel2id, relations = {}, []
for key, _ in pid2name:
rel2id[key] = len(relations)
relations.append(key)
return rel2id, relations
def set_task_order(self, filename, index):
with open(os.path.join(self.args.data_dir, 'FewRel', filename), 'r', encoding='utf-8') as file:
self.task_order = json.loads(file.read())
self.task_order = self.task_order[index]
for i in range(10):
self.relations += self.task_order["T"+str(i+1)]
for rel in self.relations:
self.rel2id[rel] = self.relations.index(rel)
def set_read_from_order(self, index):
self.read_from_order = True
self.order_index = index
def _read_data_from_order(self, filename):
ret, length = [], []
with open(os.path.join(self.args.data_dir, 'FewRel', f'Exp{self.order_index}', filename), 'r', encoding='utf-8') as file:
data = json.loads(file.read())
for key, value in data.items():
for instance in value:
if len(instance["tokens"]) < 256:
instance["input_ids"] = instance["tokens"][:instance["tokens"].index(0)]
else:
instance["input_ids"] = instance["tokens"]
instance["h_index"] = instance["input_ids"].index(30522)
instance["t_index"] = instance["input_ids"].index(30524)
instance["relation"] = key
instance["label"] = self.rel2id[key]
ret.append(instance)
length.append(len(value))
return ret, length
def get(self):
if self.read_from_order:
taskdatas = []
for i in range(self.task_num):
relation = self.relations[i*self.relnum_per_task: (i+1)*self.relnum_per_task]
train, train_len = self._read_data_from_order(f"task_{i}_train.json")
val, _ = self._read_data_from_order(f"task_{i}_val.json")
test, _ = self._read_data_from_order(f"task_{i}_test.json")
taskdatas.append({
'relation': relation,
'train': train,
'val': val,
'test': test,
'train_len': train_len
})
else:
data = self._read()
taskdatas = self._divide(data)
return taskdatas
def get_rel2id(self):
return self.rel2id
def _read(self):
"""
Read FewRel train and val data.
"""
print("Reading dataset...")
with open(os.path.join(self.args.data_dir, 'FewRel', 'train_wiki.json'), 'r', encoding='utf-8') as file:
train_set = json.loads(file.read())
with open(os.path.join(self.args.data_dir, 'FewRel', 'val_wiki.json'), 'r', encoding='utf-8') as file:
val_set = json.loads(file.read())
dataset = train_set
dataset.update(val_set)
print("Read finished!")
return dataset
def _divide(self, dataset):
"""
Divide dataset into <self.task_num> tasks.
"""
if self.task_order is None:
relations = list(dataset.keys())
random.shuffle(relations)
for rel in relations:
self.relations.append(rel)
self.rel2id[rel] = self.relations.index(rel)
relations = self.relations
print(f'Dividing dataset into {self.task_num} tasks...')
taskdatas = []
for i in tqdm(range(self.task_num)):
train, val, test = [], [], []
train_len = []
relation = relations[i*self.relnum_per_task: (i+1)*self.relnum_per_task]
for r in relation:
rdata = dataset[r]
random.shuffle(rdata)
for instance in rdata:
# todo add entity marker and index
instance['relation'] = r
instance['label'] = self.relations.index(r)
sentence = ' '.join(instance['tokens']).lower()
h = ' '.join(instance['tokens'][instance['h'][-1][0][0]: instance['h'][-1][0][-1]+1]).lower()
t = ' '.join(instance['tokens'][instance['t'][-1][0][0]: instance['t'][-1][0][-1]+1]).lower()
sentence = sentence.replace(h, f"[E11] {h} [E12]")
sentence = sentence.replace(t, f"[E21] {t} [E22]")
instance['input_ids'] = self.tokenizer.encode(sentence)
instance['h_index'] = instance['input_ids'].index(self.tokenizer.additional_special_tokens_ids[0])
instance['t_index'] = instance['input_ids'].index(self.tokenizer.additional_special_tokens_ids[2])
train.extend(rdata[0: self.train_num])
val.extend(rdata[self.train_num: self.train_num+self.val_num])
test.extend(rdata[self.train_num+self.val_num:])
train_len.append(self.train_num)
task = {
'relation': relation,
'train': train, # {"tokens", "h", "t", "relation", "label", "input_ids", "h_index", "t_index"}
'val': val,
'test': test,
'train_len': train_len
}
taskdatas.append(task)
return taskdatas
def _tokenize(self, taskdatas):
for taskdata in taskdatas:
for name in ["train", "val", "test"]:
for sample in taskdata[name]:
input_ids = self.tokenizer.encode(' '.join(sample['tokens']))
sample['input_ids'] = input_ids
return taskdatas
class tacredProcessor():
def __init__(self, args, tokenizer):
self.args = args
self.tokenizer = tokenizer
self.task_num = args.task_num
self.relation_num = 40
self.relnum_per_task = int(self.relation_num/self.task_num)
args.relation_num = self.relation_num
args.relnum_per_task = self.relnum_per_task
self.train_num = 320
self.val_num = 0
self.test_num = 40
self.task_order = None
self.read_from_order = False
self.relations = []
self.rel2id = {}
def set_task_order(self, filename, index):
with open(os.path.join(self.args.data_dir, 'tacred', filename), 'r', encoding='utf-8') as file:
self.task_order = json.loads(file.read())
self.task_order = self.task_order[index]
for i in range(10):
self.relations += self.task_order["T"+str(i+1)]
for rel in self.relations:
self.rel2id[rel] = self.relations.index(rel)
print(f"Experiment Num {index}")
assert len(self.relations) == len(self.rel2id)
def set_read_from_order(self, index):
self.read_from_order = True
self.order_index = index
def _read_data_from_order(self, filename):
ret, length = [], []
with open(os.path.join(self.args.data_dir, 'tacred', f'Exp{self.order_index}', filename), 'r', encoding='utf-8') as file:
data = json.loads(file.read())
for key, value in data.items():
for instance in value:
tail = instance["tokens"].index(0) if 0 in instance["tokens"] else 256
instance["input_ids"] = instance["tokens"][:tail]
instance["h_index"] = instance["input_ids"].index(30522)
instance["t_index"] = instance["input_ids"].index(30524)
instance["relation"] = key
instance["label"] = self.rel2id[key]
ret.append(instance)
length.append(len(value))
return ret, length
def get(self):
if self.read_from_order:
taskdatas = []
for i in range(self.task_num):
relation = self.relations[i*self.relnum_per_task: (i+1)*self.relnum_per_task]
train, train_len = self._read_data_from_order(f"task_{i}_train.json")
val, _ = self._read_data_from_order(f"task_{i}_val.json")
test, _ = self._read_data_from_order(f"task_{i}_test.json")
taskdatas.append({
'relation': relation,
'train': train,
'val': val,
'test': test,
'train_len': train_len
})
else:
data = self._read()
taskdatas = self._divide(data)
return taskdatas
def get_rel2id(self):
return self.rel2id
def get_ralation_description(self):
descriptions = [self.tokenizer.encode(' '.join(relation[relation.index(":")+1:].split('_'))) for relation in self.relations]
return descriptions
def _read(self):
"""
Read tacred dataset.
"""
print("Reading dataset...")
with open(os.path.join(self.args.data_dir, 'tacred', 'train.json'), 'r', encoding='utf-8') as file:
train_set = json.loads(file.read())
with open(os.path.join(self.args.data_dir, 'tacred', 'dev.json'), 'r', encoding='utf-8') as file:
val_set = json.loads(file.read())
with open(os.path.join(self.args.data_dir, 'tacred', 'test.json'), 'r', encoding='utf-8') as file:
test_set = json.loads(file.read())
dataset = train_set
dataset += val_set
dataset += test_set
if self.args.set_task_order:
dataset = self._convert_to_fewrel_form(dataset)
else:
with open(os.path.join(self.args.data_dir, 'tacred', 'relations.json'), 'r', encoding='utf-8') as file:
self.relations = json.loads(file.read())
dataset = self._convert_to_fewrel_form(dataset)
self.relations = []
print("Read finished!")
return dataset
def _convert_to_fewrel_form(self, dataset):
new_dataset = {}
for sample in dataset:
relation = sample["relation"]
if relation not in self.relations:
continue
h = ' '.join(sample["token"][sample['subj_start']:sample['subj_end']+1])
t = ' '.join(sample["token"][sample['obj_start']:sample['obj_end']+1])
new_sample = {
"tokens": sample["token"],
"h": [h, "Q114514", [[i for i in range(sample['subj_start'], sample['subj_end']+1)]]],
"t": [t, "Q114514", [[i for i in range(sample['obj_start'], sample['obj_end']+1)]]]
}
if relation not in new_dataset:
new_dataset[relation] = [new_sample]
else:
new_dataset[relation].append(new_sample)
return new_dataset
def _divide(self, dataset):
"""
Divide dataset into <self.task_num> tasks.
"""
if self.task_order is None:
relations = list(dataset.keys())
random.shuffle(relations)
for rel in relations:
self.relations.append(rel)
self.rel2id[rel] = self.relations.index(rel)
relations = self.relations
print(f'Dividing dataset into {self.task_num} tasks...')
taskdatas = []
for i in tqdm(range(self.task_num)):
train, val, test = [], [], []
train_len = []
relation = relations[i*self.relnum_per_task: (i+1)*self.relnum_per_task]
for r in relation:
rdata = dataset[r]
random.shuffle(rdata)
for instance in rdata:
# todo add entity marker and index
instance['relation'] = r
instance['label'] = self.relations.index(r)
h_start = instance['h'][-1][0][0]
instance['tokens'].insert(h_start, "[E11]")
h_end = instance['h'][-1][0][-1]+2
instance['tokens'].insert(h_end, "[E12]")
t_start = instance['t'][-1][0][0]
t_end = instance['t'][-1][0][-1]+2
if h_start < t_start:
t_start += 2
t_end += 2
instance['tokens'].insert(t_start, "[E21]")
instance['tokens'].insert(t_end, "[E22]")
sentence = ' '.join(instance['tokens']).lower()
sentence = sentence.replace("[e11]", "[E11]")
sentence = sentence.replace("[e12]", "[E12]")
sentence = sentence.replace("[e21]", "[E21]")
sentence = sentence.replace("[e22]", "[E22]")
instance['input_ids'] = self.tokenizer.encode(sentence)
instance['h_index'] = instance['input_ids'].index(self.tokenizer.additional_special_tokens_ids[0])
instance['t_index'] = instance['input_ids'].index(self.tokenizer.additional_special_tokens_ids[2])
test_count = 0
train_count = 0
for i in range(len(rdata)):
if i < len(rdata) // 5 and test_count <= self.test_num:
test.append(rdata[i])
test_count += 1
else:
train.append(rdata[i])
train_count += 1
if train_count >= self.train_num:
break
train_len.append(train_count)
task = {
'relation': relation,
'train': train, # {"tokens", "h", "t", "relation", "label", "input_ids", "h_index", "t_index"}
'val': val,
'test': test,
'train_len': train_len
}
taskdatas.append(task)
return taskdatas
| 14,791 | 40.203343 | 132 | py |
CEAR | CEAR-main/main.py | # encoding:utf-8
import os
import nni
import math
import time
import json
import torch
import argparse
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.optim import AdamW
from transformers import BertTokenizer, BertModel
from nni.utils import merge_parameter
from model import BertEncoder, Classifier
from data_process import FewRelProcessor, tacredProcessor
from utils import collate_fn, save_checkpoint, get_prototypes, memory_select, set_random_seed, compute_cos_sim, get_augmentative_data
default_print = "\033[0m"
blue_print = "\033[1;34;40m"
yellow_print = "\033[1;33;40m"
green_print = "\033[1;32;40m"
def do_train(args, tokenizer, processor, i_exp):
memory = []
memory_len = []
relations = []
testset = []
prev_encoder, prev_classifier = None, None
taskdatas = processor.get()
rel2id = processor.get_rel2id() # {"rel": id}
task_acc, memory_acc = [], []
prototypes = None
for i in range(args.task_num):
task = taskdatas[i]
traindata, _, testdata = task['train'], task['val'], task['test']
train_len = task['train_len']
testset += testdata
new_relations = task['relation']
relations += new_relations
args.seen_rel_num = len(relations)
# print some info
print(f"{yellow_print}Training task {i}, relation set {task['relation']}.{default_print}")
# train and val on task data
current_encoder = BertEncoder(args, tokenizer, encode_style=args.encode_style)
current_classifier = Classifier(args, args.hidden_dim, args.seen_rel_num, prev_classifier).to(args.device)
if prev_encoder is not None:
current_encoder.load_state_dict(prev_encoder.state_dict())
if args.dataset_name == "FewRel":
current_encoder = train_val_task(args, current_encoder, current_classifier, traindata, testdata, rel2id, train_len)
else:
aug_traindata = get_augmentative_data(args, traindata, train_len)
current_encoder = train_val_task(args, current_encoder, current_classifier, aug_traindata, testdata, rel2id, train_len)
# memory select
print(f'{blue_print}Selecting memory for task {i}...{default_print}')
new_memory, new_memory_len = memory_select(args, current_encoder, traindata, train_len)
memory += new_memory
memory_len += new_memory_len
# evaluate on task testdata
current_prototypes, current_proto_features = get_prototypes(args, current_encoder, traindata, train_len)
acc = evaluate(args, current_encoder, current_classifier, testdata, rel2id)
print(f'{blue_print}Accuracy of task {i} is {acc}.{default_print}')
task_acc.append(acc)
# train and val on memory data
if prev_encoder is not None:
print(f'{blue_print}Training on memory...{default_print}')
task_prototypes = torch.cat([task_prototypes, current_prototypes], dim=0)
task_proto_features = torch.cat([task_proto_features, current_proto_features], dim=0)
prototypes = torch.cat([prototypes, current_prototypes], dim=0)
proto_features = torch.cat([proto_features, current_proto_features], dim=0)
current_model = (current_encoder, current_classifier)
prev_model = (prev_encoder, prev_classifier)
aug_memory = get_augmentative_data(args, memory, memory_len)
current_encoder = train_val_memory(args, current_model, prev_model, memory, aug_memory, testset, rel2id, memory_len, memory_len, prototypes, proto_features, task_prototypes, task_proto_features)
else:
print(f"{blue_print}Initial task, won't train on memory.{default_print}")
# update prototype
print(f'{blue_print}Updating prototypes...{default_print}')
if prev_encoder is not None:
prototypes_replay, proto_features_replay = get_prototypes(args, current_encoder, memory, memory_len)
prototypes, proto_features = (1-args.beta)*task_prototypes + args.beta*prototypes_replay, (1-args.beta)*task_proto_features + args.beta*proto_features_replay
prototypes = F.layer_norm(prototypes, [args.hidden_dim])
proto_features = F.normalize(proto_features, p=2, dim=1)
else:
task_prototypes, task_proto_features = current_prototypes, current_proto_features
prototypes, proto_features = current_prototypes, current_proto_features
# test
print(f'{blue_print}Evaluating...{default_print}')
if prev_encoder is not None:
acc = evaluate(args, current_encoder, current_classifier, testset, rel2id, proto_features)
else:
acc = evaluate(args, current_encoder, current_classifier, testset, rel2id)
print(f'{green_print}Evaluate finished, final accuracy over task 0-{i} is {acc}.{default_print}')
memory_acc.append(acc)
# save checkpoint
print(f'{blue_print}Saving checkpoint of task {i}...{default_print}')
save_checkpoint(args, current_encoder, i_exp, i, "encoder")
save_checkpoint(args, current_classifier, i_exp, i, "classifier")
prev_encoder = current_encoder
prev_classifier = current_classifier
nni.report_intermediate_result(acc)
return task_acc, memory_acc
def train_val_task(args, encoder, classifier, traindata, valdata, rel2id, train_len):
dataloader = DataLoader(traindata, batch_size=args.train_batch_size, shuffle=True, collate_fn=args.collate_fn, drop_last=True)
optimizer = AdamW([
{'params': encoder.parameters(), 'lr': args.encoder_lr},
{'params': classifier.parameters(), 'lr': args.classifier_lr}
], eps=args.adam_epsilon)
# todo add different learning rate for each layer
best_acc = 0.0
for epoch in range(args.epoch_num_task):
encoder.train()
classifier.train()
for step, batch in enumerate(tqdm(dataloader)):
inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
hidden, _ = encoder(**inputs)
inputs = {
'hidden': hidden,
'labels': batch[4].to(args.device)
}
loss, _ = classifier(**inputs)
loss.backward()
optimizer.step()
optimizer.zero_grad()
acc = evaluate(args, encoder, classifier, valdata, rel2id)
best_acc = max(acc, best_acc)
print(f'Evaluate on epoch {epoch}, accuracy={acc}, best_accuracy={best_acc}')
return encoder
def train_val_memory(args, model, prev_model, traindata, aug_traindata, testdata, rel2id, memory_len, aug_memory_len, prototypes, proto_features, task_prototypes, task_proto_features):
enc, cls = model
prev_enc, prev_cls = prev_model
dataloader = DataLoader(aug_traindata, batch_size=args.train_batch_size, shuffle=True, collate_fn=args.collate_fn, drop_last=True)
optimizer = AdamW([
{'params': enc.parameters(), 'lr': args.encoder_lr},
{'params': cls.parameters(), 'lr': args.classifier_lr}
], eps=args.adam_epsilon)
prev_enc.eval()
prev_cls.eval()
best_acc = 0.0
for epoch in range(args.epoch_num_memory):
enc.train()
cls.train()
for step, batch in enumerate(tqdm(dataloader)):
enc_inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
hidden, feature = enc(**enc_inputs)
with torch.no_grad():
prev_hidden, prev_feature = prev_enc(**enc_inputs)
labels = batch[4].to(args.device)
cont_loss = contrastive_loss(args, feature, labels, prototypes, proto_features, prev_feature)
cont_loss.backward(retain_graph=True)
rep_loss = replay_loss(args, cls, prev_cls, hidden, feature, prev_hidden, prev_feature, labels, prototypes, proto_features)
rep_loss.backward()
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
acc = evaluate(args, enc, cls, testdata, rel2id, proto_features)
best_acc = max(best_acc, acc)
print(f'Evaluate testset on epoch {epoch}, accuracy={acc}, best_accuracy={best_acc}')
nni.report_intermediate_result(acc)
prototypes_replay, proto_features_replay = get_prototypes(args, enc, traindata, memory_len)
prototypes, proto_features = (1-args.beta)*task_prototypes + args.beta*prototypes_replay, (1-args.beta)*task_proto_features + args.beta*proto_features_replay
prototypes = F.layer_norm(prototypes, [args.hidden_dim])
proto_features = F.normalize(proto_features, p=2, dim=1)
return enc
def contrastive_loss(args, feature, labels, prototypes, proto_features=None, prev_feature=None):
# supervised contrastive learning loss
dot_div_temp = torch.mm(feature, proto_features.T) / args.cl_temp # [batch_size, rel_num]
dot_div_temp_norm = dot_div_temp - 1.0 / args.cl_temp
exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8 # avoid log(0)
mask = torch.zeros_like(exp_dot_temp).to(args.device)
mask.scatter_(1, labels.unsqueeze(1), 1.0)
cardinalities = torch.sum(mask, dim=1)
log_prob = -torch.log(exp_dot_temp / torch.sum(exp_dot_temp, dim=1, keepdim=True))
scloss_per_sample = torch.sum(log_prob*mask, dim=1) / cardinalities
scloss = torch.mean(scloss_per_sample)
# focal knowledge distillation loss
if prev_feature is not None:
with torch.no_grad():
prev_proto_features = proto_features[:proto_features.shape[1]-args.relnum_per_task]
prev_sim = F.softmax(torch.mm(feature, prev_proto_features.T) / args.cl_temp / args.kd_temp, dim=1)
prob = F.softmax(torch.mm(feature, proto_features.T) / args.cl_temp / args.kd_temp, dim=1)
focal_weight = 1.0 - torch.gather(prob, dim=1, index=labels.unsqueeze(1)).squeeze()
focal_weight = focal_weight ** args.gamma
target = F.softmax(torch.mm(prev_feature, prev_proto_features.T) / args.cl_temp, dim=1) # [batch_size, prev_rel_num]
source = F.log_softmax(torch.mm(feature, prev_proto_features.T) / args.cl_temp, dim=1) # [batch_size, prev_rel_num]
target = target * prev_sim + 1e-8
fkdloss = torch.sum(-source * target, dim=1)
fkdloss = torch.mean(fkdloss * focal_weight)
else:
fkdloss = 0.0
# margin loss
if proto_features is not None:
with torch.no_grad():
sim = torch.mm(feature, proto_features.T)
neg_sim = torch.scatter(sim, 1, labels.unsqueeze(1), -10.0)
neg_indices = torch.argmax(neg_sim, dim=1)
pos_proto = proto_features[labels]
neg_proto = proto_features[neg_indices]
positive = torch.sum(feature * pos_proto, dim=1)
negative = torch.sum(feature * neg_proto, dim=1)
marginloss = torch.maximum(args.margin - positive + negative, torch.zeros_like(positive).to(args.device))
marginloss = torch.mean(marginloss)
else:
marginloss = 0.0
loss = scloss + args.cl_lambda*marginloss + args.kd_lambda2*fkdloss
return loss
def replay_loss(args, cls, prev_cls, hidden, feature, prev_hidden, prev_feature, labels, prototypes=None, proto_features=None):
# cross entropy
celoss, logits = cls(hidden, labels)
with torch.no_grad():
prev_logits, = prev_cls(prev_hidden)
if prototypes is None:
index = prev_logits.shape[1]
source = F.log_softmax(logits[:, :index], dim=1)
target = F.softmax(prev_logits, dim=1) + 1e-8
kdloss = F.kl_div(source, target)
else:
# focal knowledge distillation
with torch.no_grad():
sim = compute_cos_sim(hidden, prototypes)
prev_sim = sim[:, :prev_logits.shape[1]] # [batch_size, prev_rel_num]
prev_sim = F.softmax(prev_sim / args.kd_temp, dim=1)
prob = F.softmax(logits, dim=1)
focal_weight = 1.0 - torch.gather(prob, dim=1, index=labels.unsqueeze(1)).squeeze()
focal_weight = focal_weight ** args.gamma
source = logits.narrow(1, 0, prev_logits.shape[1])
source = F.log_softmax(source, dim=1)
target = F.softmax(prev_logits, dim=1)
target = target * prev_sim + 1e-8
kdloss = torch.sum(-source * target, dim=1)
kdloss = torch.mean(kdloss * focal_weight)
rep_loss = celoss + args.kd_lambda1*kdloss
return rep_loss
def evaluate(args, model, classifier, valdata, rel2id, proto_features=None):
model.eval()
dataloader = DataLoader(valdata, batch_size=args.test_batch_size, collate_fn=collate_fn, drop_last=False)
pred_labels, golden_labels = [], []
for i, batch in enumerate(tqdm(dataloader)):
inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
with torch.no_grad():
hidden, feature = model(**inputs)
logits = classifier(hidden)[0]
prob_cls = F.softmax(logits, dim=1)
if proto_features is not None:
logits = torch.mm(feature, proto_features.T) / args.cl_temp
prob_ncm = F.softmax(logits, dim=1)
final_prob = args.alpha*prob_cls + (1-args.alpha)*prob_ncm
else:
final_prob = prob_cls
# get pred_labels
pred_labels += torch.argmax(final_prob, dim=1).cpu().tolist()
golden_labels += batch[4].tolist()
pred_labels = torch.tensor(pred_labels, dtype=torch.long)
golden_labels = torch.tensor(golden_labels, dtype=torch.long)
acc = float(torch.sum(pred_labels==golden_labels).item()) / float(len(golden_labels))
return acc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", default="data", type=str)
parser.add_argument("--checkpoint_dir", default="checkpoint", type=str)
parser.add_argument("--dataset_name", default="tacred", type=str)
parser.add_argument("--cuda", default=True, type=bool)
parser.add_argument("--cuda_device", default=1, type=int)
parser.add_argument("--plm_name", default="bert-base-uncased", type=str)
parser.add_argument("--train_batch_size", default=16, type=int)
parser.add_argument("--test_batch_size", default=64, type=int)
parser.add_argument("--epoch_num_task", default=10, type=int, help="Max training epochs.")
parser.add_argument("--epoch_num_memory", default=10, type=int, help="Max training epochs.")
parser.add_argument("--hidden_dim", default=768 , type=int, help="Output dimension of encoder.")
parser.add_argument("--feature_dim", default=64, type=int, help="Output dimension of projection head.")
parser.add_argument("--encoder_lr", default=1e-5, type=float, help="The initial learning rate of encoder for AdamW.")
parser.add_argument("--classifier_lr", default=1e-3, type=float, help="The initial learning rate of classifier for AdamW.")
parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--alpha", default=0.6, type=float, help="Bagging Hyperparameter.")
parser.add_argument("--beta", default=0.2, type=float, help="Prototype weight.")
parser.add_argument("--cl_temp", default=0.1, type=float, help="Temperature for contrastive learning.")
parser.add_argument("--cl_lambda", default=0.8, type=float, help="Hyperparameter for contrastive learning.")
parser.add_argument("--margin", default=0.15, type=float, help="Hyperparameter for margin loss.")
parser.add_argument("--kd_temp", default=0.5, type=float, help="Temperature for knowledge distillation.")
parser.add_argument("--kd_lambda1", default=0.7, type=float, help="Hyperparameter for knowledge distillation.")
parser.add_argument("--kd_lambda2", default=0.5, type=float, help="Hyperparameter for knowledge distillation.")
parser.add_argument("--gamma", default=2.0, type=float, help="Hyperparameter of focal loss.")
parser.add_argument("--encode_style", default="emarker", type=str, help="Encode style of encoder.")
parser.add_argument("--experiment_num", default=5, type=int)
parser.add_argument("--seed", default=2022, type=int)
parser.add_argument("--set_task_order", default=True, type=bool)
parser.add_argument("--read_from_task_order", default=True, type=bool)
parser.add_argument("--task_num", default=10, type=int)
parser.add_argument("--memory_size", default=10, type=int, help="Memory size for each relation.")
parser.add_argument("--early_stop_patient", default=10, type=int)
args = parser.parse_args()
if args.cuda:
device = "cuda:"+str(args.cuda_device)
else:
device = "cpu"
args.device = device
args.collate_fn = collate_fn
tuner_params = nni.get_next_parameter()
args = merge_parameter(args, tuner_params)
tokenizer = BertTokenizer.from_pretrained(args.plm_name, additional_special_tokens=["[E11]", "[E12]", "[E21]", "[E22]"])
s = time.time()
task_results, memory_results = [], []
for i in range(args.experiment_num):
set_random_seed(args)
if args.dataset_name == "FewRel":
processor = FewRelProcessor(args, tokenizer)
else:
processor = tacredProcessor(args, tokenizer)
if args.set_task_order:
processor.set_task_order("task_order.json", i)
if args.read_from_task_order:
processor.set_read_from_order(i)
task_acc, memory_acc = do_train(args, tokenizer, processor, i)
print(f'{green_print}Result of experiment {i}:')
print(f'task acc: {task_acc}')
print(f'memory acc: {memory_acc}')
print(f'Average: {sum(memory_acc)/len(memory_acc)}{default_print}')
task_results.append(task_acc)
memory_results.append(memory_acc)
# torch.cuda.empty_cache()
e = time.time()
task_results = torch.tensor(task_results, dtype=torch.float32)
memory_results = torch.tensor(memory_results, dtype=torch.float32)
print(f'All task result: {task_results.tolist()}')
print(f'All memory result: {memory_results.tolist()}')
task_results = torch.mean(task_results, dim=0).tolist()
memory_results = torch.mean(memory_results, dim=0)
final_average = torch.mean(memory_results).item()
print(f'Final task result: {task_results}')
print(f'Final memory result: {memory_results.tolist()}')
print(f'Final average: {final_average}')
print(f'Time cost: {e-s}s.')
nni.report_final_result(final_average) | 19,179 | 44.129412 | 206 | py |
CEAR | CEAR-main/utils.py | import os
import copy
import torch
import random
import numpy as np
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import DataLoader
from sklearn.cluster import KMeans
def set_random_seed(args):
seed = args.seed
torch.manual_seed(seed)
if torch.cuda.is_available() and args.cuda:
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
random.seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
def collate_fn(batch):
max_len = max([len(sample['input_ids']) for sample in batch])
input_ids = [sample['input_ids'] + [0]*(max_len-len(sample['input_ids'])) for sample in batch]
attention_mask = [[1.0]*len(sample['input_ids']) + [0.0]*(max_len-len(sample['input_ids'])) for sample in batch]
h_index = [sample['h_index'] for sample in batch]
t_index = [sample['t_index'] for sample in batch]
labels = [sample['label'] for sample in batch]
relations = [sample['relation'] for sample in batch]
input_ids = torch.tensor(input_ids, dtype=torch.long)
attention_mask = torch.tensor(attention_mask, dtype=torch.float)
h_index = torch.tensor(h_index, dtype=torch.long)
t_index = torch.tensor(t_index, dtype=torch.long)
labels = torch.tensor(labels, dtype=torch.long)
output = (input_ids, attention_mask, h_index, t_index, labels, relations)
return output
def compute_cos_sim(tensor_a, tensor_b):
"""
tensor_a [k, m]
tensor_b [n, m]
"""
norm_a = torch.norm(tensor_a, dim=1).unsqueeze(1) # [k, 1]
norm_b = torch.norm(tensor_b, dim=1).unsqueeze(0) # [1, n]
cos_sim = torch.mm(tensor_a, tensor_b.T) / torch.mm(norm_a, norm_b) # [k, n]
return cos_sim
def save_checkpoint(args, model, i_exp, i_task, name):
if model is None:
raise Exception(f'The best model of task {i_task} is None.')
torch.save(model.state_dict(), os.path.join(args.checkpoint_dir, args.dataset_name, f"Exp{i_exp}",f"{i_task}_{name}.pkl"))
def get_prototypes(args, model, data, reldata_len):
model.eval()
dataloader = DataLoader(data, batch_size=args.test_batch_size, collate_fn=collate_fn, drop_last=False)
hiddens, features = [], []
for i, batch in enumerate(dataloader):
inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
with torch.no_grad():
hidden, _ = model(**inputs)
hiddens.append(hidden)
with torch.no_grad():
hiddens = torch.cat(hiddens, dim=0)
hidden_tensors = []
current_idx = 0
for i in range(len(reldata_len)):
rel_len = reldata_len[i]
rel_hiddens = torch.narrow(hiddens, 0, current_idx, rel_len)
hidden_proto = torch.mean(rel_hiddens, dim=0)
hidden_tensors.append(hidden_proto)
current_idx += rel_len
hidden_tensors = torch.stack(hidden_tensors, dim=0)
hidden_tensors = torch.nn.LayerNorm([args.hidden_dim]).to(args.device)(hidden_tensors)
feature_tensors = model.get_low_dim_feature(hidden_tensors)
return hidden_tensors, feature_tensors
def memory_select(args, model, data, data_len):
model.eval()
dataloader = DataLoader(data, batch_size=args.test_batch_size, collate_fn=collate_fn, drop_last=False, shuffle=False)
hiddens, memory, memory_len = [], [], []
for i, batch in enumerate(tqdm(dataloader)):
inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
with torch.no_grad():
hidden, _ = model(**inputs)
hiddens.append(hidden.cpu())
hiddens = np.concatenate(hiddens, axis=0)
current_len = 0
for i in range(args.relnum_per_task):
rel_len = data_len[i]
kmdata = hiddens[current_len: current_len+rel_len]
k = min(args.memory_size, rel_len)
kmeans = KMeans(n_clusters=k, random_state=0)
distances = kmeans.fit_transform(kmdata)
rel_data = data[current_len: current_len+rel_len]
for j in range(k):
select_idx = np.argmin(distances[:, j]) # [k]
memory.append(rel_data[select_idx])
current_len += rel_len
memory_len.append(k)
return memory, memory_len
def get_augmentative_data(args, data, data_len):
index = 0
data_double = copy.deepcopy(data)
for i in range(len(data_len)):
rel_data = data[index: index+data_len[i]]
index += data_len[i]
rel_data_temp = copy.deepcopy(rel_data)
random.shuffle(rel_data_temp)
for j in range(data_len[i]):
sample1, sample2 = rel_data[j], rel_data_temp[j]
input_ids1 = sample1['input_ids'][1:-1]
input_ids2 = sample2['input_ids'][1:-1]
h_tokens = input_ids2[input_ids2.index(30522)+1:input_ids2.index(30523)]
t_tokens = input_ids2[input_ids2.index(30524)+1:input_ids2.index(30525)]
input_ids1[input_ids1.index(30522)+1: input_ids1.index(30523)] = h_tokens
input_ids1[input_ids1.index(30524)+1: input_ids1.index(30525)] = t_tokens
input_ids = [101] + input_ids1 + [102]
h_index = input_ids.index(30522)
t_index = input_ids.index(30524)
data_double.append({
"input_ids": input_ids,
'h_index': h_index,
't_index': t_index,
'label': sample1['label'],
'relation': sample1['relation']
})
aug_data = []
add_data1 = copy.deepcopy(data_double)
random.shuffle(add_data1)
aug_data1 = rel_data_augment(args, data_double, add_data1)
aug_data += data_double
aug_data += aug_data1
return aug_data
def rel_data_augment(args, rel_data1, rel_data2):
aug_data = []
length = min(len(rel_data1), len(rel_data2))
for i in range(length):
sample1, sample2 = rel_data1[i], rel_data2[i]
input_ids1 = sample1['input_ids'][1:-1]
input_ids2 = sample2['input_ids'][1:-1]
input_ids2.remove(30522)
input_ids2.remove(30523)
input_ids2.remove(30524)
input_ids2.remove(30525)
if args.dataset_name == "FewRel":
length = 512-2-len(input_ids1)
input_ids2 = input_ids2[:length]
if i % 2 == 0:
input_ids = [101] + input_ids1 + input_ids2 + [102]
h_index = sample1['h_index']
t_index = sample1['t_index']
else:
input_ids = [101] + input_ids2 + input_ids1 + [102]
h_index = sample1['h_index'] + len(input_ids2)
t_index = sample1['t_index'] + len(input_ids2)
aug_data.append({
"input_ids": input_ids,
'h_index': h_index,
't_index': t_index,
'label': sample1['label'],
'relation': sample1['relation']
})
return aug_data
def get_aca_data(args, data, data_len, current_relations):
index = 0
rel_datas = []
for i in range(len(data_len)):
rel_data = data[index: index+data_len[i]]
rel_datas.append(rel_data)
index += data_len[i]
rel_id = args.seen_rel_num
aca_data = copy.deepcopy(data)
idx = args.relnum_per_task // 2
for i in range(args.relnum_per_task // 2):
j = i + idx
datas1 = rel_datas[i]
datas2 = rel_datas[j]
L = 5
for data1, data2 in zip(datas1, datas2):
input_ids1 = data1['input_ids'][1:-1]
e11 = input_ids1.index(30522); e12 = input_ids1.index(30523)
e21 = input_ids1.index(30524); e22 = input_ids1.index(30525)
if e21 <= e11:
continue
input_ids1_sub = input_ids1[max(0, e11-L): min(e12+L+1, e21)]
input_ids2 = data2['tokens'][1:-1]
e11 = input_ids2.index(30522); e12 = input_ids2.index(30523)
e21 = input_ids2.index(30524); e22 = input_ids2.index(30525)
if e21 <= e11:
continue
token2_sub = input_ids2[max(e12+1, e21-L): min(e22+L+1, len(input_ids2))]
input_ids = [101] + input_ids1_sub + token2_sub + [102]
aca_data.append({
'input_ids': input_ids,
'h_index': input_ids.index(30522),
't_index': input_ids.index(30524),
'label': rel_id,
'relation': data1['relation'] + '-' + data2['relation']
})
for index in [30522, 30523, 30524, 30525]:
assert index in input_ids and input_ids.count(index) == 1
rel_id += 1
for i in range(len(current_relations)):
if current_relations[i] in ['P26', 'P3373', 'per:siblings', 'org:alternate_names', 'per:spous', 'per:alternate_names', 'per:other_family']:
continue
for data in rel_datas[i]:
input_ids = data['input_ids']
e11 = input_ids.index(30522); e12 = input_ids.index(30523)
e21 = input_ids.index(30524); e22 = input_ids.index(30525)
input_ids[e11] = 30524; input_ids[e12] = 30525
input_ids[e21] = 30522; input_ids[e22] = 30523
aca_data.append({
'input_ids': input_ids,
'h_index': input_ids.index(30522),
't_index': input_ids.index(30524),
'label': rel_id,
'relation': data1['relation'] + '-reverse'
})
for index in [30522, 30523, 30524, 30525]:
assert index in input_ids and input_ids.count(index) == 1
rel_id += 1
return aca_data
| 9,913 | 35.718519 | 147 | py |
CEAR | CEAR-main/model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from utils import compute_cos_sim
from torch.nn import CrossEntropyLoss
from transformers import BertModel
class BertEncoder(nn.Module):
def __init__(self, args, tokenizer, encode_style="emarker"):
super().__init__()
self.args = args
self.tokenizer = tokenizer
self.model = BertModel.from_pretrained('bert-base-uncased').to(args.device)
self.model.resize_token_embeddings(len(tokenizer))
# 'cls' using the cls_token as the embedding
# 'emarker' concatenating the embedding of head and tail entity markers
if encode_style in ["cls", "emarker"]:
self.encode_style = encode_style
else:
raise Exception("Encode_style must be 'cls' or 'emarker'.")
if encode_style == "emarker":
hidden_size = self.model.config.hidden_size
self.linear_transform = nn.Sequential(
nn.Linear(hidden_size*2, hidden_size, bias=True),
nn.GELU(),
nn.LayerNorm([hidden_size])
).to(self.args.device)
self.head = nn.Sequential(
nn.Linear(hidden_size, hidden_size),
nn.GELU(),
nn.Linear(hidden_size, self.args.feature_dim)
).to(self.args.device)
def forward(self, input_ids, attention_mask, h_index, t_index, labels = None):
plm_output = self.model(input_ids, attention_mask=attention_mask)['last_hidden_state']
if self.encode_style == "cls":
hidden = plm_output.index_select(1, torch.tensor([0]).to(self.args.device)).squeeze() # [batch_size, hidden_size]
else:
h = torch.stack([plm_output[i, h_index[i], :] for i in range(len(h_index))], dim=0) # [batch_size, hidden_size]
t = torch.stack([plm_output[i, t_index[i], :] for i in range(len(t_index))], dim=0) # [batch_size, hidden_size]
ht_embeddings = torch.cat([h, t], dim=1) # [batch_size, hidden_size*2]
hidden = self.linear_transform(ht_embeddings) # [batch_size, hidden_size]
feature = self.head(hidden) # [batch_size, feature_dim]
feature = F.normalize(feature, p=2, dim=1) # [batch_size, feature_dim]
output = (hidden, feature)
if labels is not None:
# compute scloss of current task
dot_div_temp = torch.mm(feature, feature.T) / self.args.cl_temp # [batch_size, batch_size]
dot_div_temp_norm = dot_div_temp - torch.max(dot_div_temp, dim=1, keepdim=True)[0].detach() # [batch_size, batch_size]
exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8 # avoid log(0) [batch_size, batch_size]
mask = (labels.unsqueeze(1).repeat(1, labels.shape[0]) == labels).to(self.args.device) # [batch_size, batch_size]
cardinalities = torch.sum(mask, dim=1) # [batch_size]
log_prob = -torch.log(exp_dot_temp / torch.sum(exp_dot_temp, dim=1, keepdim=True)) # [batch_size, batch_size]
scloss_per_sample = torch.sum(log_prob*mask, dim=1) / cardinalities # [batch_size]
scloss = torch.mean(scloss_per_sample)
loss = scloss
output = (loss, ) + output
return output
def get_low_dim_feature(self, hidden):
feature = self.head(hidden)
feature = F.normalize(feature, p=2, dim=1)
return feature
class Classifier(nn.Module):
def __init__(self, args, hidden_dim, label_num, prev_classifier=None):
super().__init__()
self.args = args
self.label_num = label_num
self.classifier = nn.Linear(hidden_dim, label_num, bias=False)
self.loss_fn = CrossEntropyLoss()
def forward(self, hidden, labels=None):
logits = self.classifier(hidden)
output = (logits, )
if labels is not None:
loss = self.loss_fn(logits, labels)
output = (loss, ) + output
return output
def incremental_learning(self, seen_rel_num):
weight = self.classifier.weight.data
self.classifier = nn.Linear(768, seen_rel_num, bias=False).to(self.args.device)
with torch.no_grad():
self.classifier.weight.data[:seen_rel_num] = weight[:seen_rel_num]
class FocalLoss(nn.Module):
def __init__(self, alpha=0.25, gamma=2.0, reduction="mean"):
super().__init__()
self.alpha = alpha
self.gamma = gamma
self.reduction = reduction
def forward(self, logits, targets):
ce_loss = F.cross_entropy(logits, targets, reduction='none')
pt = torch.exp(-ce_loss)
focal_loss = self.alpha * (1 - pt)**self.gamma * ce_loss
if self.reduction == "mean":
return focal_loss.mean()
elif self.reduction == "sum":
return focal_loss.sum()
return focal_loss
| 4,900 | 40.533898 | 130 | py |
CEAR | CEAR-main/aca.py | # encoding:utf-8
import os
import nni
import math
import time
import json
import torch
import argparse
import torch.nn.functional as F
from tqdm import tqdm
from torch.utils.data import DataLoader
from torch.optim import AdamW
from transformers import BertTokenizer, BertModel
from nni.utils import merge_parameter
from model import BertEncoder, Classifier
from data_process import FewRelProcessor, tacredProcessor
from utils import collate_fn, save_checkpoint, get_prototypes, memory_select, set_random_seed, compute_cos_sim, get_aca_data, get_augmentative_data
default_print = "\033[0m"
blue_print = "\033[1;34;40m"
yellow_print = "\033[1;33;40m"
green_print = "\033[1;32;40m"
def do_train(args, tokenizer, processor):
memory = []
memory_len = []
relations = []
testset = []
prev_encoder, prev_classifier = None, None
taskdatas = processor.get()
rel2id = processor.get_rel2id() # {"rel": id}
task_acc, memory_acc = [], []
prototypes = None
for i in range(args.task_num):
task = taskdatas[i]
traindata, _, testdata = task['train'], task['val'], task['test']
train_len = task['train_len']
testset += testdata
new_relations = task['relation']
relations += new_relations
args.seen_rel_num = len(relations)
# print some info
print(f"{yellow_print}Training task {i}, relation set {task['relation']}.{default_print}")
# train and val on task data
current_encoder = BertEncoder(args, tokenizer, encode_style=args.encode_style)
current_classifier = Classifier(args, args.hidden_dim, 3*args.seen_rel_num, prev_classifier).to(args.device)
if prev_encoder is not None:
current_encoder.load_state_dict(prev_encoder.state_dict())
aug_traindata = get_aca_data(args, traindata, train_len, task['relation'])
current_encoder = train_val_task(args, current_encoder, current_classifier, aug_traindata, testdata, rel2id, train_len)
current_classifier.incremental_learning(args.seen_rel_num)
# memory select
print(f'{blue_print}Selecting memory for task {i}...{default_print}')
new_memory, new_memory_len = memory_select(args, current_encoder, traindata, train_len)
memory += new_memory
memory_len += new_memory_len
# evaluate on task testdata
current_prototypes, current_proto_features = get_prototypes(args, current_encoder, traindata, train_len)
acc = evaluate(args, current_encoder, current_classifier, testdata, rel2id)
print(f'{blue_print}Accuracy of task {i} is {acc}.{default_print}')
task_acc.append(acc)
# train and val on memory data
if prev_encoder is not None:
print(f'{blue_print}Training on memory...{default_print}')
task_prototypes = torch.cat([task_prototypes, current_prototypes], dim=0)
task_proto_features = torch.cat([task_proto_features, current_proto_features], dim=0)
prototypes = torch.cat([prototypes, current_prototypes], dim=0)
proto_features = torch.cat([proto_features, current_proto_features], dim=0)
current_model = (current_encoder, current_classifier)
prev_model = (prev_encoder, prev_classifier)
aug_memory = get_augmentative_data(args, memory, memory_len)
current_encoder = train_val_memory(args, current_model, prev_model, memory, aug_memory, testset, rel2id, memory_len, memory_len, prototypes, proto_features, task_prototypes, task_proto_features)
else:
print(f"{blue_print}Initial task, won't train on memory.{default_print}")
# update prototype
print(f'{blue_print}Updating prototypes...{default_print}')
if prev_encoder is not None:
prototypes_replay, proto_features_replay = get_prototypes(args, current_encoder, memory, memory_len)
prototypes, proto_features = (1-args.beta)*task_prototypes + args.beta*prototypes_replay, (1-args.beta)*task_proto_features + args.beta*proto_features_replay
prototypes = F.layer_norm(prototypes, [args.hidden_dim])
proto_features = F.normalize(proto_features, p=2, dim=1)
else:
task_prototypes, task_proto_features = current_prototypes, current_proto_features
prototypes, proto_features = current_prototypes, current_proto_features
# test
print(f'{blue_print}Evaluating...{default_print}')
if prev_encoder is not None:
acc = evaluate(args, current_encoder, current_classifier, testset, rel2id, proto_features)
else:
acc = evaluate(args, current_encoder, current_classifier, testset, rel2id)
print(f'{green_print}Evaluate finished, final accuracy over task 0-{i} is {acc}.{default_print}')
memory_acc.append(acc)
# save checkpoint
print(f'{blue_print}Saving checkpoint of task {i}...{default_print}')
prev_encoder = current_encoder
prev_classifier = current_classifier
nni.report_intermediate_result(acc)
return task_acc, memory_acc
def train_val_task(args, encoder, classifier, traindata, valdata, rel2id, train_len):
dataloader = DataLoader(traindata, batch_size=args.train_batch_size, shuffle=True, collate_fn=args.collate_fn, drop_last=True)
optimizer = AdamW([
{'params': encoder.parameters(), 'lr': args.encoder_lr},
{'params': classifier.parameters(), 'lr': args.classifier_lr}
], eps=args.adam_epsilon)
best_acc = 0.0
for epoch in range(args.epoch_num_task):
encoder.train()
classifier.train()
for step, batch in enumerate(tqdm(dataloader)):
inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
hidden, _ = encoder(**inputs)
inputs = {
'hidden': hidden,
'labels': batch[4].to(args.device)
}
loss, _ = classifier(**inputs)
loss.backward()
optimizer.step()
optimizer.zero_grad()
acc = evaluate(args, encoder, classifier, valdata, rel2id)
best_acc = max(acc, best_acc)
print(f'Evaluate on epoch {epoch}, accuracy={acc}, best_accuracy={best_acc}')
return encoder
def train_val_memory(args, model, prev_model, traindata, aug_traindata, testdata, rel2id, memory_len, aug_memory_len, prototypes, proto_features, task_prototypes, task_proto_features):
enc, cls = model
prev_enc, prev_cls = prev_model
dataloader = DataLoader(aug_traindata, batch_size=args.train_batch_size, shuffle=True, collate_fn=args.collate_fn, drop_last=True)
optimizer = AdamW([
{'params': enc.parameters(), 'lr': args.encoder_lr},
{'params': cls.parameters(), 'lr': args.classifier_lr}
], eps=args.adam_epsilon)
prev_enc.eval()
prev_cls.eval()
best_acc = 0.0
for epoch in range(args.epoch_num_memory):
enc.train()
cls.train()
for step, batch in enumerate(tqdm(dataloader)):
enc_inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
hidden, feature = enc(**enc_inputs)
with torch.no_grad():
prev_hidden, prev_feature = prev_enc(**enc_inputs)
labels = batch[4].to(args.device)
cont_loss = contrastive_loss(args, feature, labels, prototypes, proto_features, prev_feature)
cont_loss.backward(retain_graph=True)
rep_loss = replay_loss(args, cls, prev_cls, hidden, feature, prev_hidden, prev_feature, labels, prototypes, proto_features)
rep_loss.backward()
optimizer.step()
optimizer.zero_grad()
if (epoch+1) % 10 == 0:
acc = evaluate(args, enc, cls, testdata, rel2id, proto_features)
best_acc = max(best_acc, acc)
print(f'Evaluate testset on epoch {epoch}, accuracy={acc}, best_accuracy={best_acc}')
nni.report_intermediate_result(acc)
prototypes_replay, proto_features_replay = get_prototypes(args, enc, traindata, memory_len)
prototypes, proto_features = (1-args.beta)*task_prototypes + args.beta*prototypes_replay, (1-args.beta)*task_proto_features + args.beta*proto_features_replay
prototypes = F.layer_norm(prototypes, [args.hidden_dim])
proto_features = F.normalize(proto_features, p=2, dim=1)
return enc
def contrastive_loss(args, feature, labels, prototypes, proto_features=None, prev_feature=None):
# supervised contrastive learning loss
dot_div_temp = torch.mm(feature, proto_features.T) / args.cl_temp # [batch_size, rel_num]
dot_div_temp_norm = dot_div_temp - 1.0 / args.cl_temp
exp_dot_temp = torch.exp(dot_div_temp_norm) + 1e-8 # avoid log(0)
mask = torch.zeros_like(exp_dot_temp).to(args.device)
mask.scatter_(1, labels.unsqueeze(1), 1.0)
cardinalities = torch.sum(mask, dim=1)
log_prob = -torch.log(exp_dot_temp / torch.sum(exp_dot_temp, dim=1, keepdim=True))
scloss_per_sample = torch.sum(log_prob*mask, dim=1) / cardinalities
scloss = torch.mean(scloss_per_sample)
# focal knowledge distillation loss
if prev_feature is not None:
with torch.no_grad():
prev_proto_features = proto_features[:proto_features.shape[1]-args.relnum_per_task]
prev_sim = F.softmax(torch.mm(feature, prev_proto_features.T) / args.cl_temp / args.kd_temp, dim=1)
prob = F.softmax(torch.mm(feature, proto_features.T) / args.cl_temp / args.kd_temp, dim=1)
focal_weight = 1.0 - torch.gather(prob, dim=1, index=labels.unsqueeze(1)).squeeze()
focal_weight = focal_weight ** args.gamma
target = F.softmax(torch.mm(prev_feature, prev_proto_features.T) / args.cl_temp, dim=1) # [batch_size, prev_rel_num]
source = F.log_softmax(torch.mm(feature, prev_proto_features.T) / args.cl_temp, dim=1) # [batch_size, prev_rel_num]
target = target * prev_sim + 1e-8
fkdloss = torch.sum(-source * target, dim=1)
fkdloss = torch.mean(fkdloss * focal_weight)
else:
fkdloss = 0.0
# margin loss
if proto_features is not None:
with torch.no_grad():
sim = torch.mm(feature, proto_features.T)
neg_sim = torch.scatter(sim, 1, labels.unsqueeze(1), -10.0)
neg_indices = torch.argmax(neg_sim, dim=1)
pos_proto = proto_features[labels]
neg_proto = proto_features[neg_indices]
positive = torch.sum(feature * pos_proto, dim=1)
negative = torch.sum(feature * neg_proto, dim=1)
marginloss = torch.maximum(args.margin - positive + negative, torch.zeros_like(positive).to(args.device))
marginloss = torch.mean(marginloss)
else:
marginloss = 0.0
loss = scloss + args.cl_lambda*marginloss + args.kd_lambda2*fkdloss
return loss
def replay_loss(args, cls, prev_cls, hidden, feature, prev_hidden, prev_feature, labels, prototypes=None, proto_features=None):
# cross entropy
celoss, logits = cls(hidden, labels)
with torch.no_grad():
prev_logits, = prev_cls(prev_hidden)
if prototypes is None:
index = prev_logits.shape[1]
source = F.log_softmax(logits[:, :index], dim=1)
target = F.softmax(prev_logits, dim=1) + 1e-8
kdloss = F.kl_div(source, target)
else:
# focal knowledge distillation
with torch.no_grad():
sim = compute_cos_sim(hidden, prototypes)
prev_sim = sim[:, :prev_logits.shape[1]] # [batch_size, prev_rel_num]
prev_sim = F.softmax(prev_sim / args.kd_temp, dim=1)
prob = F.softmax(logits, dim=1)
focal_weight = 1.0 - torch.gather(prob, dim=1, index=labels.unsqueeze(1)).squeeze()
focal_weight = focal_weight ** args.gamma
source = logits.narrow(1, 0, prev_logits.shape[1])
source = F.log_softmax(source, dim=1)
target = F.softmax(prev_logits, dim=1)
target = target * prev_sim + 1e-8
kdloss = torch.sum(-source * target, dim=1)
kdloss = torch.mean(kdloss * focal_weight)
rep_loss = celoss + args.kd_lambda1*kdloss
return rep_loss
def evaluate(args, model, classifier, valdata, rel2id, proto_features=None):
model.eval()
dataloader = DataLoader(valdata, batch_size=args.test_batch_size, collate_fn=collate_fn, drop_last=False)
pred_labels, golden_labels = [], []
for i, batch in enumerate(tqdm(dataloader)):
inputs = {
'input_ids': batch[0].to(args.device),
'attention_mask': batch[1].to(args.device),
'h_index': batch[2].to(args.device),
't_index': batch[3].to(args.device),
}
with torch.no_grad():
hidden, feature = model(**inputs)
logits = classifier(hidden)[0]
prob_cls = F.softmax(logits, dim=1)
if proto_features is not None:
logits = torch.mm(feature, proto_features.T) / args.cl_temp
prob_ncm = F.softmax(logits, dim=1)
final_prob = args.alpha*prob_cls + (1-args.alpha)*prob_ncm
else:
final_prob = prob_cls
# get pred_labels
pred_labels += torch.argmax(final_prob, dim=1).cpu().tolist()
golden_labels += batch[4].tolist()
pred_labels = torch.tensor(pred_labels, dtype=torch.long)
golden_labels = torch.tensor(golden_labels, dtype=torch.long)
acc = float(torch.sum(pred_labels==golden_labels).item()) / float(len(golden_labels))
return acc
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--data_dir", default="data", type=str)
parser.add_argument("--checkpoint_dir", default="checkpoint", type=str)
parser.add_argument("--dataset_name", default="FewRel", type=str)
parser.add_argument("--cuda", default=True, type=bool)
parser.add_argument("--cuda_device", default=3, type=int)
parser.add_argument("--plm_name", default="bert-base-uncased", type=str)
parser.add_argument("--train_batch_size", default=16, type=int)
parser.add_argument("--test_batch_size", default=128, type=int)
parser.add_argument("--epoch_num_task", default=10, type=int, help="Max training epochs.")
parser.add_argument("--epoch_num_memory", default=10, type=int, help="Max training epochs.")
parser.add_argument("--hidden_dim", default=768 , type=int, help="Output dimension of encoder.")
parser.add_argument("--feature_dim", default=64, type=int, help="Output dimension of projection head.")
parser.add_argument("--encoder_lr", default=1e-5, type=float, help="The initial learning rate of encoder for AdamW.")
parser.add_argument("--classifier_lr", default=1e-3, type=float, help="The initial learning rate of classifier for AdamW.")
parser.add_argument("--adam_epsilon", default=1e-6, type=float, help="Epsilon for Adam optimizer.")
parser.add_argument("--alpha", default=0.5, type=float, help="Bagging Hyperparameter.")
parser.add_argument("--beta", default=0.5, type=float, help="Prototype weight.")
parser.add_argument("--cl_temp", default=0.1, type=float, help="Temperature for contrastive learning.")
parser.add_argument("--cl_lambda", default=0.5, type=float, help="Hyperparameter for contrastive learning.")
parser.add_argument("--margin", default=0.1, type=float, help="Hyperparameter for margin loss.")
parser.add_argument("--kd_temp", default=0.5, type=float, help="Temperature for knowledge distillation.")
parser.add_argument("--kd_lambda1", default=1.1, type=float, help="Hyperparameter for knowledge distillation.")
parser.add_argument("--kd_lambda2", default=0.5, type=float, help="Hyperparameter for knowledge distillation.")
parser.add_argument("--gamma", default=1.25, type=float, help="Hyperparameter of focal loss.")
parser.add_argument("--encode_style", default="emarker", type=str, help="Encode style of encoder.")
parser.add_argument("--experiment_num", default=5, type=int)
parser.add_argument("--seed", default=2022, type=int)
parser.add_argument("--set_task_order", default=True, type=bool)
parser.add_argument("--read_from_task_order", default=True, type=bool)
parser.add_argument("--task_num", default=10, type=int)
parser.add_argument("--memory_size", default=10, type=int, help="Memory size for each relation.")
parser.add_argument("--early_stop_patient", default=10, type=int)
args = parser.parse_args()
if args.cuda:
device = "cuda:"+str(args.cuda_device)
else:
device = "cpu"
args.device = device
args.collate_fn = collate_fn
tuner_params = nni.get_next_parameter()
args = merge_parameter(args, tuner_params)
tokenizer = BertTokenizer.from_pretrained(args.plm_name, additional_special_tokens=["[E11]", "[E12]", "[E21]", "[E22]"])
s = time.time()
task_results, memory_results = [], []
for i in range(args.experiment_num):
set_random_seed(args)
if args.dataset_name == "FewRel":
processor = FewRelProcessor(args, tokenizer)
else:
processor = tacredProcessor(args, tokenizer)
if args.set_task_order:
processor.set_task_order("task_order.json", i)
if args.read_from_task_order:
processor.set_read_from_order(i)
task_acc, memory_acc = do_train(args, tokenizer, processor)
print(f'{green_print}Result of experiment {i}:')
print(f'task acc: {task_acc}')
print(f'memory acc: {memory_acc}')
print(f'Average: {sum(memory_acc)/len(memory_acc)}{default_print}')
task_results.append(task_acc)
memory_results.append(memory_acc)
# torch.cuda.empty_cache()
e = time.time()
task_results = torch.tensor(task_results, dtype=torch.float32)
memory_results = torch.tensor(memory_results, dtype=torch.float32)
print(f'All task result: {task_results.tolist()}')
print(f'All memory result: {memory_results.tolist()}')
task_results = torch.mean(task_results, dim=0).tolist()
memory_results = torch.mean(memory_results, dim=0)
final_average = torch.mean(memory_results).item()
print(f'Final task result: {task_results}')
print(f'Final memory result: {memory_results.tolist()}')
print(f'Final average: {final_average}')
print(f'Time cost: {e-s}s.')
nni.report_final_result(final_average) | 18,878 | 43.95 | 206 | py |
conditional_kde | conditional_kde-main/setup.py | #!/usr/bin/env python
"""The setup script."""
from setuptools import setup, find_packages
with open('README.rst') as readme_file:
readme = readme_file.read()
with open('HISTORY.rst') as history_file:
history = history_file.read()
requirements = ["numpy", "scipy", "scikit-learn"]
test_requirements = ['pytest>=3', ]
setup(
author="David Prelogović",
author_email='david.prelogovic@gmail.com',
python_requires='>=3.6',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
description="Conditional Kernel Density Estimation.",
install_requires=requirements,
license="MIT license",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='conditional_kde',
name='conditional_kde',
packages=find_packages(include=['conditional_kde', 'conditional_kde.*']),
test_suite='tests',
tests_require=test_requirements,
url='https://github.com/dprelogo/conditional_kde',
version='0.1.0',
zip_safe=False,
)
| 1,339 | 28.777778 | 77 | py |
conditional_kde | conditional_kde-main/conditional_kde/gaussian.py | """Module containing Gaussian versions of the Conditional KDE."""
import numpy as np
from scipy.optimize import minimize_scalar
from scipy.special import logsumexp
from sklearn.neighbors import KernelDensity
from sklearn.model_selection import train_test_split, GridSearchCV
from .util import DataWhitener
class ConditionalGaussian:
"""Conditional Gaussian. Makes a simple Gaussian fit to the data, allowing for conditioning.
Args:
bandwidth (float): allows for the additional smoothing/shrinking of the covariance.
In most cases, it should be left as 1.
"""
def __init__(
self,
bandwidth=1.0,
):
if not isinstance(bandwidth, (int, float)):
raise ValueError("Bandwidth should be a number.")
self.bandwidth = bandwidth
self.features = None # names of features
self.dw = None # data whitener
@staticmethod
def _log_prob(X, mean, cov, add_norm=True):
"""Log probability of a gaussian KDE distribution.
Args:
X (array): array of samples for which probability is calculated.
Of shape `(n, n_features)`.
mean (array): mean of a gaussian distribution.
cov (float, array): covariance matrix of a gaussian distribution.
If float, it is a variance shared for all features.
If 1D array, it is a variance for every feature separately.
if 2D array, it is a full covariance matrix.
add_norm (bool): either to add normalization factor to the calculation or not.
Returns:
Log probabilities.
"""
n_features = mean.shape[-1]
X = np.atleast_2d(X)
if X.shape[-1] != n_features:
raise ValueError("`n_features` of samples should be the same as the mean.")
if not isinstance(cov, (int, float, np.ndarray)):
raise TypeError(
f"`cov` should be a number or `numpy.ndarray`, "
f"but is {type(cov).__name__}"
)
if isinstance(cov, (int, float)):
Σ_inv = np.identity(n_features, dtype=np.float32) / cov
Σ_det = cov**n_features
elif isinstance(cov, np.ndarray):
if len(cov.shape) == 1:
if len(cov) != n_features:
raise ValueError("`cov` should be of length `n_features`.")
Σ_inv = np.diag(1 / cov)
Σ_det = np.prod(cov)
elif len(cov.shape) == 2:
if cov.shape != (n_features, n_features):
raise ValueError(
"`cov` should be of shape `(n_features, n_features)`."
)
Σ_inv = np.linalg.inv(cov)
Σ_det = np.linalg.det(cov)
else:
raise ValueError(
"Dimensionality of a covariance matrix cannot be larger than 2."
)
delta = X - mean
log_prob = -0.5 * np.einsum("ij,jk,ik->i", delta, Σ_inv, delta)
if add_norm:
norm = 0.5 * n_features * np.log(2 * np.pi) + 0.5 * np.log(Σ_det)
return log_prob - norm
else:
return log_prob
@staticmethod
def _covariance_decomposition(cov, cond_mask, cond_only=False):
"""Decomposing covariance matrix into the unconditional, conditional and cross terms.
Args:
cov (array): covariance matrix.
cond_mask (array): boolean array defining conditional dimensions.
cond_only (bool): to return only conditional matrix or all decompositions.
Returns:
If `cond_only is True`, only conditional part of the covariance,
otherwise: conditional, unconditional and cross parts, respectively.
"""
if len(cov) != len(cond_mask):
raise ValueError(
"Dimensionality of `cov` and `cond_mask` should be the same."
)
if len(cov.shape) != 2 or cov.shape[0] != cov.shape[-1]:
raise ValueError("`cov` should be 2D square matrix.")
mask_cond = np.outer(cond_mask, cond_mask)
shape_cond = (sum(cond_mask), sum(cond_mask))
if cond_only:
return cov[mask_cond].reshape(shape_cond)
else:
mask_uncond = np.outer(~cond_mask, ~cond_mask)
shape_uncond = (sum(~cond_mask), sum(~cond_mask))
mask_cross = np.outer(cond_mask, ~cond_mask)
shape_cross = (sum(cond_mask), sum(~cond_mask))
return (
cov[mask_cond].reshape(shape_cond),
cov[mask_uncond].reshape(shape_uncond),
cov[mask_cross].reshape(shape_cross),
)
def fit(
self,
X,
weights=None,
features=None,
):
"""Fitting the Conditional Kernel Density.
Args:
X (array): data of shape `(n_samples, n_features)`.
weights (array): weights of every sample, of shape `(n_samples)`.
features (list): optional, list defining names for every feature.
It's used for referencing conditional dimensions.
Defaults to `[0, 1, ..., n_features - 1]`.
Returns:
An instance of itself.
"""
n_samples, n_features = X.shape
if features is None:
self.features = list(range(n_features))
else:
if not isinstance(features, list):
raise TypeError("`features` should be a `list`.")
elif n_features != len(features):
raise ValueError(
f"`n_features` ({n_features}) should be equal to "
f"the length of `features` ({len(features)})."
)
elif len(features) != len(set(features)):
raise ValueError("All `features` should be unique.")
self.features = features
self.dw = DataWhitener("ZCA")
self.dw.fit(X, weights, save_data=False)
return self
def score_samples(self, X, conditional_features=None):
"""Compute the (un)conditional log-probability of each sample under the model.
Args:
X (array): data of shape `(n, n_features)`.
Last dimension should match dimension of training data `(n_features)`.
conditional_features (list): subset of `self.features`, which dimensions of data
to condition upon. Defaults to `None`, meaning unconditional log-probability.
Returns:
Conditional log probability for each sample in `X`.
"""
if conditional_features is None:
X = np.atleast_2d(X)
return self._log_prob(X, self.dw.μ, self.dw.Σ * self.bandwidth**2)
else:
if not all(k in self.features for k in conditional_features):
raise ValueError(
"All conditional_features should be in features. If you haven't "
"specified features, pick integers from `[0, 1, ..., n_features - 1]`."
)
if len(conditional_features) == self.features:
raise ValueError(
"Doesn't make much sense to condition on all features. "
"Probability of that is 1."
)
cond_variables = [
True if f in conditional_features else False for f in self.features
]
cond_variables = np.array(cond_variables, dtype=bool)
X = np.atleast_2d(X)
p_full = self._log_prob(X, self.dw.μ, self.dw.Σ * self.bandwidth**2)
Σ_marginal = self._covariance_decomposition(
self.dw.Σ * self.bandwidth**2, cond_variables, cond_only=True
)
p_marginal = self._log_prob(
X[:, cond_variables],
self.dw.μ[:, cond_variables],
Σ_marginal,
)
return p_full - p_marginal
def _check_conditionals(self, conditionals, n_samples):
if not isinstance(conditionals, dict):
raise TypeError(
"`conditional_features` should be dictionary, but is "
f"{type(conditionals).__name__}."
)
if not all(k in self.features for k in conditionals.keys()):
raise ValueError(
"All conditionals should be in features. If you haven't "
"specified features, pick integers from `[0, 1, ..., n_features - 1]`."
)
if len(conditionals) == len(self.features):
raise ValueError("One cannot condition on all features.")
if any(not isinstance(v, (float, int)) for v in conditionals.values()):
if not all(isinstance(v, np.ndarray) for v in conditionals.values()):
raise ValueError(
"For vectorized conditionals, all should be `np.ndarray`."
)
if not all(v.ndim == 1 for v in conditionals.values()):
raise ValueError("For vectorized conditionals, all should be 1D.")
lengths = [len(v) for v in conditionals.values()]
if not all(l == lengths[0] for l in lengths):
raise ValueError(
"For vectorized conditionals, all should have the same length."
)
vectorized_conditionals = True
n_samples = lengths[0]
else:
vectorized_conditionals = False
return vectorized_conditionals, n_samples
def sample(
self,
conditionals=None,
n_samples=1,
random_state=None,
keep_dims=False,
):
"""Generate random samples from the conditional model. There are two modes
of sampling:
(1) specify conditionals as scalar values and sample `n_samples` out of distribution.
(2) specify conditionals as an array, where the number of samples will be the length of an array.
Args:
conditionals (dict): desired variables (features) to condition upon.
Dictionary keys should be only feature names from `features`.
For example, if `self.features == ["a", "b", "c"]` and one would like to condition
on "a" and "c", then `conditionals = {"a": cond_val_a, "c": cond_val_c}`.
Conditioned values can be either `float` or `array`, where in the case of the
latter, all conditioned arrays have to be of the same size.
Defaults to `None`, i.e. normal KDE.
n_samples (int): number of samples to generate. Ignored in the case
conditional arrays have been passed in `conditionals`. Defaults to 1.
random_state (np.random.RandomState, int): seed or `RandomState` instance, optional.
Determines random number generation used to generate
random samples. See `Glossary <random_state>`.
keep_dims (bool): whether to return non-conditioned dimensions only
or keep given conditional values. Defaults to `False`.
Returns:
Array of samples, of shape `(n_samples, n_features)` if `conditional_variables is None`,
or `(n_samples, n_features - len(conditionals))` otherwise.
"""
if isinstance(random_state, np.random.RandomState):
rs = random_state
elif random_state is None or isinstance(random_state, int):
rs = np.random.RandomState(seed=random_state)
else:
raise TypeError("`random_state` should be `int` or `RandomState`.")
if conditionals is None:
return rs.multivariate_normal(
self.dw.μ.flatten(), self.dw.Σ * self.bandwidth**2, n_samples
)
else:
vectorized_conditionals, n_samples = self._check_conditionals(
conditionals, n_samples
)
# helping variables for conditionals
cond_variables = [
True if f in conditionals.keys() else False for f in self.features
]
cond_values = [
conditionals[f] for f in self.features if f in conditionals.keys()
]
cond_variables = np.array(cond_variables, dtype=bool)
if vectorized_conditionals:
cond_values = np.stack(cond_values, axis=-1).astype(np.float32)
else:
cond_values = np.array(cond_values, dtype=np.float32)
# decomposing the covarianve
Σ_cond, Σ_uncond, Σ_cross = self._covariance_decomposition(
self.dw.Σ * self.bandwidth**2, cond_variables
)
# distribution is defined from corrected unconditional part
Σ_cond_inv = np.linalg.inv(Σ_cond)
corr_Σ = Σ_uncond - Σ_cross.T @ Σ_cond_inv @ Σ_cross
corr_μ = (
self.dw.μ[:, ~cond_variables]
+ (cond_values - self.dw.μ[:, cond_variables]) @ Σ_cond_inv @ Σ_cross
)
sample = np.empty((n_samples, len(self.features)))
sample[:, ~cond_variables] = (
rs.multivariate_normal(np.zeros(len(corr_Σ)), corr_Σ, n_samples)
+ corr_μ
)
if vectorized_conditionals:
sample[:, cond_variables] = cond_values
else:
sample[:, cond_variables] = np.broadcast_to(
cond_values, (n_samples, len(cond_values))
)
if keep_dims:
return sample
else:
return sample[:, ~cond_variables]
class ConditionalGaussianKernelDensity:
"""Conditional Kernel Density estimator.
Args:
whitening_algorithm (str): data whitening algorithm, either `None`, "rescale" or "ZCA".
See `util.DataWhitener` for more details. "rescale" by default.
bandwidth (str, float): the width of the Gaussian centered around every point.
It can be either:\n
(1) "scott", using Scott's parameter,\n
(2) "optimized", which minimizes cross entropy to find the optimal bandwidth, or\n
(3) `float`, specifying the actual value.\n
By default, it uses Scott's parameter.
**kwargs: additional kwargs used in the case of "optimized" bandwidth.
steps (int): how many steps to use in optimization, 10 by default.\n
cv_fold (int): cross validation fold, 5 by default.\n
n_jobs (int): number of jobs to run cross validation in parallel,
-1 by default, i.e. using all available processors.\n
verbose (int): verbosity of the cross validation run,
for more details see `sklearn.model_selection.GridSearchCV`.
"""
def __init__(
self,
whitening_algorithm="rescale",
bandwidth="scott",
**kwargs,
):
if whitening_algorithm not in [None, "rescale", "ZCA"]:
raise ValueError(
f"Whitening lgorithm should be None, rescale or ZCA, but is {whitening_algorithm}."
)
self.algorithm = whitening_algorithm
if not isinstance(bandwidth, (int, float)):
if not bandwidth in ["scott", "optimized"]:
raise ValueError(
f"""Bandwidth should be a number, "scott" or "optimized",
but has value {bandwidth} and type {type(bandwidth).__name__}."""
)
self.bandwidth = bandwidth
if self.bandwidth == "optimized":
self.bandwidth_kwargs = {
"steps": kwargs.get("steps", 10),
"cv_fold": kwargs.get("cv_fold", 5),
"n_jobs": kwargs.get("n_jobs", -1),
"verbose": kwargs.get("verbose", 0),
}
else:
self.bandwidth_kwargs = {}
self.features = None # names of features
self.dw = None # data whitener
@staticmethod
def log_scott(n_samples, n_features):
"""Scott's parameter."""
return -1 / (n_features + 4) * np.log10(n_samples)
@staticmethod
def _log_prob(X, data, cov, add_norm=True):
"""Log probability of a gaussian KDE distribution.
Args:
X (array): array of samples for which probability is calculated.
Of shape `(n, n_features)`.
data (array): KDE data, of shape `(n_samples, n_features)`.
cov (float, array): covariance matrix of a gaussian distribution.
If float, it is a variance shared for all features.
If 1D array, it is a variance for every feature separately.
if 2D array, it is a full covariance matrix.
add_norm (bool): either to add normalization factor to the calculation or not.
Returns:
Log probabilities.
"""
n_samples, n_features = data.shape
X = np.atleast_2d(X)
if X.shape[-1] != n_features:
raise ValueError("`n_features` of both arrays should be the same.")
if not isinstance(cov, (int, float, np.ndarray)):
raise TypeError(
f"`cov` should be a number or `numpy.ndarray`, "
f"but is {type(cov).__name__}"
)
if isinstance(cov, (int, float)):
Σ_inv = np.identity(n_features, dtype=np.float32) / cov
Σ_det = cov**n_features
elif isinstance(cov, np.ndarray):
if len(cov.shape) == 1:
if len(cov) != n_features:
raise ValueError("`cov` should be of length `n_features`.")
Σ_inv = np.diag(1 / cov)
Σ_det = np.prod(cov)
elif len(cov.shape) == 2:
if cov.shape != (n_features, n_features):
raise ValueError(
"`cov` should be of shape `(n_features, n_features)`."
)
Σ_inv = np.linalg.inv(cov)
Σ_det = np.linalg.det(cov)
else:
raise ValueError(
"Dimensionality of a covariance matrix cannot be larger than 2."
)
def calculate_log_prob(x):
delta = x - data
res = -0.5 * np.einsum("ij,jk,ik->i", delta, Σ_inv, delta)
return logsumexp(res)
log_prob = np.apply_along_axis(calculate_log_prob, 1, X)
if add_norm:
norm = (
np.log(n_samples)
+ 0.5 * n_features * np.log(2 * np.pi)
+ 0.5 * np.log(Σ_det)
)
return log_prob - norm
else:
return log_prob
@staticmethod
def _covariance_decomposition(cov, cond_mask, cond_only=False):
"""Decomposing covariance matrix into the unconditional, conditional and cross terms.
Args:
cov (array): covariance matrix.
cond_mask (array): boolean array defining conditional dimensions.
cond_only (bool): to return only conditional matrix or all decompositions.
Returns:
If `cond_only is True`, only conditional part of the covariance,
otherwise: conditional, unconditional and cross parts, respectively.
"""
if len(cov) != len(cond_mask):
raise ValueError(
"Dimensionality of `cov` and `cond_mask` should be the same."
)
if len(cov.shape) != 2 or cov.shape[0] != cov.shape[-1]:
raise ValueError("`cov` should be 2D square matrix.")
mask_cond = np.outer(cond_mask, cond_mask)
shape_cond = (sum(cond_mask), sum(cond_mask))
if cond_only:
return cov[mask_cond].reshape(shape_cond)
else:
mask_uncond = np.outer(~cond_mask, ~cond_mask)
shape_uncond = (sum(~cond_mask), sum(~cond_mask))
mask_cross = np.outer(cond_mask, ~cond_mask)
shape_cross = (sum(cond_mask), sum(~cond_mask))
return (
cov[mask_cond].reshape(shape_cond),
cov[mask_uncond].reshape(shape_uncond),
cov[mask_cross].reshape(shape_cross),
)
@staticmethod
def _conditional_weights(
conditional_values, conditional_data, cov, optimize_memory=False
):
"""Weights for the sampling from the conditional distribution.
They amount to the conditioned part of the gaussian for every data point.
Args:
conditional_values (array): of length `n_conditionals`.
cond_data (array): of shape `(n_samples, n_conditionals)`.
Here non-conditional dimensions are already removed.
cov (float, array): covariance matrix.
If float, it is a variance shared for all features.
If 1D array, it is a variance for every feature separately.
if 2D array, it is a full covariance matrix.
optimize_memory (bool): only for the vectorized conditionals, it makes an
effort to minimize memory footprint, and enlarges computational time.
Returns:
Normalized weights.
"""
if conditional_values.ndim == 1:
delta = conditional_values - conditional_data
if isinstance(cov, float):
log_weights = -0.5 / cov * np.einsum("ij,ij->i", delta, delta)
elif isinstance(cov, np.ndarray) and len(cov.shape) == 1:
log_weights = -0.5 * np.einsum("ij,j,ij->i", delta, 1 / cov, delta)
elif isinstance(cov, np.ndarray) and len(cov.shape) == 2:
cov_inv = np.linalg.inv(cov)
log_weights = -0.5 * np.einsum("ij,jk,ik->i", delta, cov_inv, delta)
else:
raise ValueError("`cov` cannot be more than 2D.")
# calculate exp(weights) in a more stable way
log_weights_sum = logsumexp(log_weights)
else:
if optimize_memory:
if isinstance(cov, float):
log_weights = (
-0.5
/ cov
* (
np.einsum(
"ij,ij->i", conditional_values, conditional_values
)[:, np.newaxis]
+ np.einsum("ij,ij->i", conditional_data, conditional_data)[
np.newaxis, :
]
- 2
* np.einsum(
"ij,kj->ik", conditional_values, conditional_data
)
)
)
elif isinstance(cov, np.ndarray) and len(cov.shape) == 1:
log_weights = (
-0.5
* np.einsum(
"ij,j,ij->i",
conditional_values,
1 / cov,
conditional_values,
)[:, np.newaxis]
- 0.5
* np.einsum(
"ij,j,ij->i", conditional_data, 1 / cov, conditional_data
)[np.newaxis, :]
+ np.einsum(
"ij,j,kj->ik", conditional_values, 1 / cov, conditional_data
)
)
elif isinstance(cov, np.ndarray) and len(cov.shape) == 2:
cov_inv = np.linalg.inv(cov)
log_weights = (
-0.5
* np.einsum(
"ij,jk,ik->i",
conditional_values,
cov_inv,
conditional_values,
)[:, np.newaxis]
- 0.5
* np.einsum(
"ij,jk,ik->i", conditional_data, cov_inv, conditional_data
)[np.newaxis, :]
+ np.einsum(
"ij,jk,lk->il",
conditional_values,
cov_inv,
conditional_data,
)
)
else:
raise ValueError("`cov` cannot be more than 2D.")
else:
delta = (
conditional_values[:, np.newaxis, :]
- conditional_data[np.newaxis, ...]
)
if isinstance(cov, float):
log_weights = -0.5 / cov * np.einsum("ijk,ijk->ij", delta, delta)
elif isinstance(cov, np.ndarray) and len(cov.shape) == 1:
log_weights = -0.5 * np.einsum(
"ijk,k,ijk->ij", delta, 1 / cov, delta
)
elif isinstance(cov, np.ndarray) and len(cov.shape) == 2:
cov_inv = np.linalg.inv(cov)
log_weights = -0.5 * np.einsum(
"ijk,kl,ijl->ij", delta, cov_inv, delta
)
else:
raise ValueError("`cov` cannot be more than 2D.")
log_weights_sum = logsumexp(log_weights, axis=1, keepdims=True)
log_weights -= log_weights_sum
mask = log_weights < -22
weights = np.exp(log_weights)
weights[mask] = 0.0
if conditional_values.ndim == 1:
return weights / np.sum(weights)
else:
return weights / np.sum(weights, axis=1, keepdims=True)
def fit(
self,
X,
features=None,
):
"""Fitting the Conditional Kernel Density.
Args:
X (array): data of shape `(n_samples, n_features)`.
features (list): optional, list defining names for every feature.
It's used for referencing conditional dimensions.
Defaults to `[0, 1, ..., n_features - 1]`.
Returns:
An instance of itself.
"""
n_samples, n_features = X.shape
if features is None:
self.features = list(range(n_features))
else:
if not isinstance(features, list):
raise TypeError("`features` should be a `list`.")
elif n_features != len(features):
raise ValueError(
f"`n_features` ({n_features}) should be equal to "
f"the length of `features` ({len(features)})."
)
elif len(features) != len(set(features)):
raise ValueError("All `features` should be unique.")
self.features = features
self.dw = DataWhitener(self.algorithm)
self.dw.fit(X, save_data=True)
if self.bandwidth == "scott":
self.bandwidth = 10 ** self.log_scott(n_samples, n_features)
elif self.bandwidth == "optimized":
log_scott = self.log_scott(n_samples, n_features)
model = GridSearchCV(
KernelDensity(),
{
"bandwidth": np.logspace(
log_scott - 1, log_scott + 1, num=self.bandwidth_kwargs["steps"]
)
},
cv=self.bandwidth_kwargs["cv_fold"],
n_jobs=self.bandwidth_kwargs["n_jobs"],
verbose=self.bandwidth_kwargs["verbose"],
)
model.fit(self.dw.whitened_data)
self.bandwidth = model.best_params_["bandwidth"]
return self
def score_samples(self, X, conditional_features=None):
"""Compute the (un)conditional log-probability of each sample under the model.
Args:
X (array): data of shape `(n, n_features)`.
Last dimension should match dimension of training data `(n_features)`.
conditional_features (list): subset of `self.features`, which dimensions of data
to condition upon. Defaults to `None`, meaning unconditional log-probability.
Returns:
Conditional log probability for each sample in `X`.
"""
if conditional_features is None:
X = np.atleast_2d(X)
return self._log_prob(X, self.dw.data, self.dw.Σ * self.bandwidth**2)
else:
if not all(k in self.features for k in conditional_features):
raise ValueError(
"All conditional_features should be in features. If you haven't "
"specified features, pick integers from `[0, 1, ..., n_features - 1]`."
)
if len(conditional_features) == self.features:
raise ValueError(
"Doesn't make much sense to condition on all features. "
"Probability of that is 1."
)
cond_variables = [
True if f in conditional_features else False for f in self.features
]
cond_variables = np.array(cond_variables, dtype=bool)
X = np.atleast_2d(X)
p_full = self._log_prob(X, self.dw.data, self.dw.Σ * self.bandwidth**2)
Σ_marginal = self._covariance_decomposition(
self.dw.Σ * self.bandwidth**2, cond_variables, cond_only=True
)
p_marginal = self._log_prob(
X[:, cond_variables],
self.dw.data[:, cond_variables],
Σ_marginal,
)
return p_full - p_marginal
def _check_conditionals(self, conditionals, n_samples):
if not isinstance(conditionals, dict):
raise TypeError(
"`conditional_features` should be dictionary, but is "
f"{type(conditionals).__name__}."
)
if not all(k in self.features for k in conditionals.keys()):
raise ValueError(
"All conditionals should be in features. If you haven't "
"specified features, pick integers from `[0, 1, ..., n_features - 1]`."
)
if len(conditionals) == len(self.features):
raise ValueError("One cannot condition on all features.")
if any(not isinstance(v, (float, int)) for v in conditionals.values()):
if not all(isinstance(v, np.ndarray) for v in conditionals.values()):
raise ValueError(
"For vectorized conditionals, all should be `np.ndarray`."
)
if not all(v.ndim == 1 for v in conditionals.values()):
raise ValueError("For vectorized conditionals, all should be 1D.")
lengths = [len(v) for v in conditionals.values()]
if not all(l == lengths[0] for l in lengths):
raise ValueError(
"For vectorized conditionals, all should have the same length."
)
vectorized_conditionals = True
n_samples = lengths[0]
else:
vectorized_conditionals = False
return vectorized_conditionals, n_samples
def _sample(
self,
conditionals=None,
n_samples=1,
random_state=None,
keep_dims=False,
):
"""Generate random samples from the conditional model.
Here there is an assumption that all dimensions have not been distorted,
but only rescaled. In other words, it works for `None` and "rescale"
whitening algorithms, but not for "ZCA".
"""
data = self.dw.whitened_data
if isinstance(random_state, np.random.RandomState):
rs = random_state
elif random_state is None or isinstance(random_state, int):
rs = np.random.RandomState(seed=random_state)
else:
raise TypeError("`random_state` should be `int` or `RandomState`.")
if conditionals is None:
idx = rs.choice(data.shape[0], n_samples)
sample = rs.normal(np.atleast_2d(data[idx]), self.bandwidth)
return self.dw.unwhiten(sample)
else:
vectorized_conditionals, n_samples = self._check_conditionals(
conditionals, n_samples
)
# scaling conditional variables
cond_variables = [
True if f in conditionals.keys() else False for f in self.features
]
cond_variables = np.array(cond_variables, dtype=bool)
if vectorized_conditionals:
cond_values = np.zeros(
(n_samples, len(self.features)), dtype=np.float32
)
for i, f in enumerate(self.features):
if f in conditionals.keys():
cond_values[:, i] = conditionals[f]
cond_values = self.dw.whiten(cond_values)[:, cond_variables]
else:
cond_values = np.zeros(len(self.features), dtype=np.float32)
for i, f in enumerate(self.features):
if f in conditionals.keys():
cond_values[i] = conditionals[f]
cond_values = self.dw.whiten(cond_values)[cond_variables]
weights = self._conditional_weights(
cond_values, data[:, cond_variables], self.bandwidth**2
)
if vectorized_conditionals:
# idx = np.apply_along_axis(
# lambda x: rs.choice(data.shape[0], p=x),
# 1,
# weights,
# )
unit_sample = rs.uniform(size=n_samples)[:, np.newaxis]
idx = np.argmax(np.cumsum(weights, axis=1) > unit_sample, axis=1)
else:
idx = rs.choice(data.shape[0], n_samples, p=weights)
# pulling the samples
sample = np.empty((n_samples, len(self.features)))
sample[:, ~cond_variables] = rs.normal(
np.atleast_2d(data[idx])[:, ~cond_variables], self.bandwidth
)
if vectorized_conditionals:
sample[:, cond_variables] = cond_values
else:
sample[:, cond_variables] = np.broadcast_to(
cond_values, (n_samples, len(cond_values))
)
sample = self.dw.unwhiten(sample)
if keep_dims:
return sample
else:
return sample[:, ~cond_variables]
def _sample_general(
self,
conditionals=None,
n_samples=1,
random_state=None,
keep_dims=False,
):
"""Generate random samples from the conditional model.
This function is the most general sampler, without any assumptions.
It should be used for ZCA.
"""
if isinstance(random_state, np.random.RandomState):
rs = random_state
elif random_state is None or isinstance(random_state, int):
rs = np.random.RandomState(seed=random_state)
else:
raise TypeError("`random_state` should be `int` or `RandomState`.")
if conditionals is None:
idx = rs.choice(self.dw.data.shape[0], n_samples)
return rs.multivariate_normal(
np.zeros(len(self.dw.Σ)), self.dw.Σ * self.bandwidth**2, n_samples
) + np.atleast_2d(self.dw.data[idx])
else:
vectorized_conditionals, n_samples = self._check_conditionals(
conditionals, n_samples
)
# helping variables for conditionals
cond_variables = [
True if f in conditionals.keys() else False for f in self.features
]
cond_values = [
conditionals[f] for f in self.features if f in conditionals.keys()
]
cond_variables = np.array(cond_variables, dtype=bool)
if vectorized_conditionals:
cond_values = np.stack(cond_values, axis=-1).astype(np.float32)
else:
cond_values = np.array(cond_values, dtype=np.float32)
# decomposing the covarianve
Σ_cond, Σ_uncond, Σ_cross = self._covariance_decomposition(
self.dw.Σ * self.bandwidth**2, cond_variables
)
# weights are defined from conditional part
weights = self._conditional_weights(
cond_values,
self.dw.data[:, cond_variables],
Σ_cond,
)
if vectorized_conditionals:
# idx = np.apply_along_axis(
# lambda x: rs.choice(self.dw.data.shape[0], p=x),
# 1,
# weights,
# )
unit_sample = rs.uniform(size=n_samples)[:, np.newaxis]
idx = np.argmax(np.cumsum(weights, axis=1) > unit_sample, axis=1)
else:
idx = rs.choice(self.dw.data.shape[0], n_samples, p=weights)
selected_data = np.atleast_2d(self.dw.data[idx])
# distribution is defined from corrected unconditional part
Σ_cond_inv = np.linalg.inv(Σ_cond)
corr_Σ = Σ_uncond - Σ_cross.T @ Σ_cond_inv @ Σ_cross
corr_data = (
selected_data[:, ~cond_variables]
+ (cond_values - selected_data[:, cond_variables])
@ Σ_cond_inv
@ Σ_cross
)
sample = np.empty((n_samples, len(self.features)))
sample[:, ~cond_variables] = (
rs.multivariate_normal(np.zeros(len(corr_Σ)), corr_Σ, n_samples)
+ corr_data
)
if vectorized_conditionals:
sample[:, cond_variables] = cond_values
else:
sample[:, cond_variables] = np.broadcast_to(
cond_values, (n_samples, len(cond_values))
)
if keep_dims:
return sample
else:
return sample[:, ~cond_variables]
def sample(
self,
conditionals=None,
n_samples=1,
random_state=None,
keep_dims=False,
):
"""Generate random samples from the conditional model. There are two modes
of sampling:
(1) specify conditionals as scalar values and sample `n_samples` out of distribution.
(2) specify conditionals as an array, where the number of samples will be the length of an array.
Args:
conditionals (dict): desired variables (features) to condition upon.
Dictionary keys should be only feature names from `features`.
For example, if `self.features == ["a", "b", "c"]` and one would like to condition
on "a" and "c", then `conditionals = {"a": cond_val_a, "c": cond_val_c}`.
Conditioned values can be either `float` or `array`, where in the case of the
latter, all conditioned arrays have to be of the same size.
Defaults to `None`, i.e. normal KDE.
n_samples (int): number of samples to generate. Ignored in the case
conditional arrays have been passed in `conditionals`. Defaults to 1.
random_state (np.random.RandomState, int): seed or `RandomState` instance, optional.
Determines random number generation used to generate
random samples. See `Glossary <random_state>`.
keep_dims (bool): whether to return non-conditioned dimensions only
or keep given conditional values. Defaults to `False`.
Returns:
Array of samples, of shape `(n_samples, n_features)` if `conditional_variables is None`,
or `(n_samples, n_features - len(conditionals))` otherwise.
"""
if self.algorithm == "ZCA":
return self._sample_general(
conditionals=conditionals,
n_samples=n_samples,
random_state=random_state,
keep_dims=keep_dims,
)
else:
return self._sample(
conditionals=conditionals,
n_samples=n_samples,
random_state=random_state,
keep_dims=keep_dims,
)
| 40,564 | 41.036269 | 105 | py |
conditional_kde | conditional_kde-main/conditional_kde/util.py | """Important utilities."""
import itertools
import numpy as np
from scipy.interpolate import RegularGridInterpolator
from scipy.interpolate.interpnd import _ndim_coords_from_arrays
class DataWhitener:
"""Whitening of the data.
Implements several algorithms, depending on the desired whitening properties.
Args:
algorithm (str): either `None`, `"center"`, `"rescale"`, `"PCA"` or `"ZCA"`.
**None** - leaves the data as is. **center** - calculates mean in
every dimension and removes it from the data. **rescale** - calculates
mean and standard deviation in each dimension and rescales it to zero-mean,
unit-variance. In the absence of high correlations between dimensions, this is often sufficient.
**PCA** - data is transformed into its PCA space and divided by the
standard deviation of each dimension. **ZCA** - equivalent to the `"PCA"`,
with additional step of rotating back to original space, i.e. the
orientation of space is preserved.
"""
def __init__(self, algorithm="rescale"):
if algorithm not in [None, "center", "rescale", "PCA", "ZCA"]:
raise ValueError("algorithm should be None, center, rescale, PCA or ZCA.")
self.algorithm = algorithm
self.μ = None # mean
self.Σ = None # covariance matrix
self.W = None # whitening matrix
self.WI = None # unwhitening matrix
def fit(self, X, weights=None, save_data=False):
"""Fitting the whitener on the data X.
Args:
X (array): of shape `(n_samples, n_dim)`.
weights (array): of shape `(n_samples,)`, weights of each sample in `X`.
save_data (bool): if `True`, saves the data and whitened data as
`self.data`, `self.whitened_data`.
Returns:
Whitened array.
"""
if weights is None:
weights = np.ones((len(X), 1), dtype=X.dtype)
else:
if len(weights) != len(X):
raise ValueError("Weights and X should be of the same length.")
weights = weights / np.sum(weights) * len(weights)
weights = weights.reshape(-1, 1)
if self.algorithm is None:
self.μ = np.zeros((1, X.shape[-1]), dtype=X.dtype)
self.Σ = np.identity(X.shape[-1], dtype=X.dtype)
else:
# self.μ = np.mean(X, axis=0, keepdims=True)
self.μ = np.sum(weights * X, axis=0, keepdims=True) / np.sum(weights)
dX = X - self.μ
self.Σ = np.einsum("ji,jk->ik", weights * dX, dX) / (np.sum(weights) - 1)
if self.algorithm == "rescale":
# self.Σ = np.diag(np.var(X, axis=0))
self.Σ = np.diag(np.diag(self.Σ))
elif self.algorithm in ["PCA", "ZCA"]:
# self.Σ = np.cov(X.T)
evals, evecs = np.linalg.eigh(self.Σ)
if self.algorithm is None:
self.W = np.identity(X.shape[-1], dtype=X.dtype)
self.WI = np.identity(X.shape[-1], dtype=X.dtype)
elif self.algorithm == "rescale":
self.W = np.diag(np.diag(self.Σ) ** (-1 / 2))
self.WI = np.diag(np.diag(self.Σ) ** (1 / 2))
elif self.algorithm == "PCA":
self.W = np.einsum("ij,kj->ik", np.diag(evals ** (-1 / 2)), evecs)
self.WI = np.einsum("ij,jk->ik", evecs, np.diag(evals ** (1 / 2)))
elif self.algorithm == "ZCA":
self.W = np.einsum("ij,jk,lk->il", evecs, np.diag(evals ** (-1 / 2)), evecs)
self.WI = np.einsum("ij,jk,lk->il", evecs, np.diag(evals ** (1 / 2)), evecs)
if save_data:
self.data = X
self.whitened_data = self.whiten(X)
def whiten(self, X):
"""Whiten the data by making it unit covariance.
Args:
X (array): of shape `(n_samples, n_dims)`.
Data to whiten. `n_dims` has to be the same as self.data.
Returns:
Whitened data, of shape `(n_samples, n_dims)`.
"""
if self.algorithm is None:
return X
if len(X.shape) == 1:
X = np.expand_dims(X, axis=0)
squeeze = True
else:
squeeze = False
if self.algorithm == "center":
X_whitened = X - self.μ
else:
X_whitened = np.einsum("ij,kj->ki", self.W, X - self.μ)
return np.squeeze(X_whitened) if squeeze else X_whitened
def unwhiten(self, X_whitened):
"""Un-whiten the sample with whitening parameters from the data.
Args:
X_whitened (array): array_like, shape `(n_samples, n_dims)`
Sample of the data to un-whiten.
`n_dims` has to be the same as `self.data`.
Returns:
Unwhitened data, of shape `(n_samples, n_dims)`.
"""
if self.algorithm is None:
return X_whitened
if len(X_whitened.shape) == 1:
X_whitened = np.expand_dims(X_whitened, axis=0)
squeeze = True
else:
squeeze = False
if self.algorithm == "center":
X = X_whitened + self.μ
else:
X = np.einsum("ij,kj->ki", self.WI, X_whitened) + self.μ
return np.squeeze(X) if squeeze else X
class Interpolator(RegularGridInterpolator):
"""Regular grid interpolator.
Inherits from `scipy.interpolate.RegularGridInterpolator`.
The difference with respect to the original class is to make
weights and edges explicitly visible, for the more general usage case.
Args:
points (list): list of lists, where every sub-list defines the grid points.
values (array): array of function values to interpolate.
If not given, `Interpolator` won't return any values, only weights and edges.
method (str): either "linear" or "nearest", defining the interpolation method.
As the name says, "linear" returns linearly interpolated value
and "nearest" only the nearest value on the grid.
bounds_error (bool): either to raise an error if the point for which
interpolation is requested is out of the grid bounds.
fill_value (float): value to be filled for out of the grid points.
"""
def __init__(
self, points, values=None, method="linear", bounds_error=True, fill_value=np.nan
):
self.interpolate_values = False if values is None else True
if values is None:
values = np.zeros(tuple(len(p) for p in points), dtype=float)
super().__init__(
points,
values,
method=method,
bounds_error=bounds_error,
fill_value=fill_value,
)
def __call__(self, xi, method=None, return_aux=True):
"""Interpolation at coordinates.
If values were not passed during initialization, it doesn't interpolate
but returns grid coordinates and weights only.
Args:
xi (array): the coordinates to sample the gridded data at, of shape `(..., ndim)`.
method (str): The method of interpolation to perform.
Supported are "linear" and "nearest".
return_aux (bool): If `True`, return includes grid coordinates and weights.
Returns:
If function values were set during initialization, returns an array of interpolated values.
If `return_aux is True` it will further return grid coordinates for every item in `xi`.
In the case method is "nearest", this will be only one relevant coordinate per `xi` sample,
or multiple ones for "linear" method. For the latter, also weights of every coordinate will be returned.
"""
if self.interpolate_values is False and return_aux is False:
raise ValueError(
"Please either define `values` or set `return_aux` to `True`. "
"Otherwise there's nothing to compute."
)
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError(f"Method {method} is not defined")
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError(
f"The requested sample points xi have dimension "
f"{xi.shape[1]}, but this RegularGridInterpolator has "
f"dimension {ndim}"
)
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(
np.all(self.grid[i][0] <= p), np.all(p <= self.grid[i][-1])
):
raise ValueError(
f"One of the requested xi is out of bounds in dimension {i}"
)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result, edges, weights = self._evaluate_linear(
indices, norm_distances, out_of_bounds
)
elif method == "nearest":
result, edges = self._evaluate_nearest(
indices, norm_distances, out_of_bounds
)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
result = result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
out = ()
if self.interpolate_values:
out = out + (result,)
if return_aux:
out = out + (edges,)
if method == "linear":
out = out + (weights,)
return out[0] if len(out) == 1 else out
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,) * (self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = list(itertools.product(*[[i, i + 1] for i in indices]))
weights = []
values = 0.0
for edge_indices in edges:
weight = 1.0
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
weights.append(weight[vslice])
return values, edges, weights
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = [
np.where(yi <= 0.5, i, i + 1) for i, yi in zip(indices, norm_distances)
]
edges = tuple(idx_res)
return self.values[edges], edges
| 10,870 | 40.17803 | 116 | py |
conditional_kde | conditional_kde-main/conditional_kde/__init__.py | """Top-level package for Conditional KDE."""
__author__ = """David Prelogović"""
__email__ = "david.prelogovic@gmail.com"
__version__ = "0.1.0"
from .gaussian import ConditionalGaussian, ConditionalGaussianKernelDensity
from .interpolated import InterpolatedConditionalGaussian, InterpolatedConditionalKernelDensity
__all__ = [
"ConditionalGaussian",
"ConditionalGaussianKernelDensity",
"InterpolatedConditionalGaussian",
"InterpolatedConditionalKernelDensity",
]
| 483 | 29.25 | 95 | py |
conditional_kde | conditional_kde-main/conditional_kde/interpolated.py | """Module containing Interpolated Conditional KDE."""
import numpy as np
from scipy.special import logsumexp
from .gaussian import ConditionalGaussian, ConditionalGaussianKernelDensity
from .util import Interpolator
class InterpolatedConditionalGaussian:
"""Interpolated Conditional Gaussian estimator.
With respect to the `ConditionalGaussian`, which fits full distribution
and slices through it to obtain the conditional distribution, here we allow
for some dimensions of the data to be inherently conditional.
For such dimensions, data should be available for every point on a grid.
To compute the final conditional density, one then interpolates
for the inherently conditional dimensions, and slices through others as before.
Args:
bandwidth (float): allows for the additional smoothing/shrinking of the covariance.
In most cases, it should be left as 1.
"""
def __init__(
self,
bandwidth=1.0,
):
if not isinstance(bandwidth, (int, float)):
raise ValueError("Bandwidth should be a number.")
self.bandwidth = bandwidth
self.inherent_features = None # all inherently conditional features
self.features = None # all other features
self.interpolator = None # interpolator
self.gaussians = None # fitted array of ConditionalGaussian
def fit(
self,
data,
inherent_features=None,
features=None,
interpolation_points=None,
interpolation_method="linear",
):
"""Fitting the Interpolated Conditional Gaussian.
Let's define by Y = (y1, y2, ..., yN) inherently conditional random variables of the dataset,
and by X = (x1, x2, ..., xM) other variables, for which one has a sample of points.
This function then fits P(X | Y) for every point on a gridded Y space.
To make this possible, one needs to pass a set of X samples for every point on a grid.
Later, one can use interpolation in Y and slicing in X
to compute P(x1, x2 | x3, ..., xM, y1, ..., yN), or similar.
Note that all Y values need to be conditioned.
Args:
data (list of arrays, array): data to fit.
Of shape `(n_interp_1, n_interp_2, ..., n_samples, n_features)`.
For every point on a grid `(n_interp_1, n_interp_2, ..., n_interp_N)`
one needs to pass `(n_samples, n_features)` dataset, for which
a separate `n_features`-dim Gaussian KDE is fitted.
All points on a grid have to have the same number of features (`n_features`).
In the case `n_samples` is not the same for every point,
one needs to pass a nested list of arrays.
inherent_features (list): optional, list defining name of every
inherently conditional feature. It is used for referencing conditional dimensions.
Defaults to `[-1, -2, ..., -N]`, where `N` is the number of inherently conditional features.
features (list): optional, list defining name for every other feature.
It's used for referencing conditional dimensions.
Defaults to `[0, 1, ..., n_features - 1]`.
interpolation_points (dict): optional, a dictionary of `feature: list_of_values` pairs.
This defines the grid points for every inherently conditional feature.
Every list of values should be a strictly ascending.
By default it amounts to:
`{-1: np.linspace(0, 1, n_interp_1), ..., -N: np.linspace(0, 1, n_interp_N)}`.
interpolation_method (str): either "linear" or "nearest",
making linear interpolation between distributions or picking the closest one, respectively.
Returns:
An instance of itself.
"""
if isinstance(data, np.ndarray):
if len(data.shape) < 3:
raise ValueError(
"`data` should have at least 3 axes: one for 1D grid, one for samples, "
f"one for the rest of features, but its shape is {data.shape}"
)
N = len(data.shape) - 2
number_of_samples = data.shape[:N]
n_features = data.shape[-1]
data = data.reshape((np.prod(number_of_samples),) + data.shape[-2:])
elif isinstance(data, list):
# only calculating N, leaving main checks for later
def list_depth(l, ns=[]):
ns.append(len(l))
if isinstance(l[0], list):
ns, nf = list_depth(l[0], ns)
elif isinstance(l[0], np.ndarray):
nf = l[0].shape[-1]
else:
raise ValueError(
"`data` of type `list` should not contain anything else "
"besides other sublists or `np.ndarray`."
)
return ns, nf
number_of_samples, n_features = list_depth(data)
N = len(number_of_samples)
data = np.array(data, dtype=object)
if len(data.shape) != N and len(data.shape) != N + 2:
raise ValueError(
f"Total shape of the data should be equal to `N` ({N})"
"if `n_samples` is different for different points on the grid, "
"or `N + 2` if all points on the grid have the same number of samples."
)
if data.shape[:N] != tuple(number_of_samples):
raise ValueError(
f"Something is wrong with the shape of the data ({data.shape}). "
f"Are you sure you defined all points on a grid?"
)
if len(data.shape) == N:
data = data.flatten()
else:
data = data.reshape((np.prod(number_of_samples),) + data.shape[-2:])
else:
raise TypeError(
f"`data` should be `np.ndarray` or `list`, but is {type(data).__name__}"
)
if inherent_features is None:
self.inherent_features = list(range(-1, -N - 1, -1))
else:
if len(inherent_features) != N:
raise ValueError(
"Number of `inherent_features` should be equal to "
f"{N}, but is {len(features)}."
)
if len(inherent_features) != len(set(inherent_features)):
raise ValueError("All `inherent_features` should be unique.")
self.inherent_features = inherent_features
if features is None:
self.features = list(range(n_features))
else:
if len(features) != n_features:
raise ValueError(
"Number of `features` should be equal to "
f"{n_features}, but is {len(features)}"
)
if len(features) != len(set(features)):
raise ValueError("All `features` should be unique.")
self.features = features
if interpolation_points is None:
interpolation_points = {
i: np.linspace(0, 1, n) for i, n in enumerate(number_of_samples)
}
else:
if len(interpolation_points) != N:
raise ValueError(
f"Number of interpolation points ({len(interpolation_points)}) "
f"should be equal to the number of inherently conditional dimensions ({N}), "
f"but is ({len(interpolation_points)})."
)
if not all(
k in self.inherent_features for k in interpolation_points.keys()
):
raise ValueError(
f"All keys of `interpolation_points` ({interpolation_points.keys()}) "
f"should be in `inherent_features` ({self.inherent_features})."
)
points = [v for k, v in interpolation_points.items()]
self.interpolator = Interpolator(points, method=interpolation_method)
self.gaussians = []
for d in data:
gaussian = ConditionalGaussian(
bandwidth=self.bandwidth,
)
gaussian.fit(d.astype(float), features=self.features)
self.gaussians.append(gaussian)
self.gaussians = np.array(self.gaussians, dtype=object)
self.gaussians = self.gaussians.reshape(tuple(number_of_samples))
return self
def score_samples(self, X, inherent_conditionals, conditional_features=None):
"""Compute the conditional log-probability of each sample under the model.
For the simplicity of calculation, here the grid point is fixed by defining
a point in inherently conditional dimensions.
`X` is then an array of shape `(n, n_features)`, including all other dimensions of the data.
Args:
X (array): data of shape `(n, n_features)`.
Last dimension should match dimension of training data `(n_features)`.
inherent_conditionals (dict): values of inherent (grid) features.
This values are used to interpolate on the grid.
All inherently conditional dimensions must be defined.
conditional_features (list): subset of `self.features`, which dimensions of data
to additionally condition upon. Defaults to `None`, meaning no additionally conditioned dimensions.
Returns:
Conditional log probability for each sample in `X`, conditioned on
inherently conditional dimensions by `inherent_conditionals`
and other dimensions by `conditional_features`.
"""
N = len(self.inherent_features)
if not isinstance(inherent_conditionals, dict):
raise TypeError(
f"`inherent_conditionals` should be dictionary, but is {type(inherent_conditionals).__name__}"
)
if sorted(inherent_conditionals.keys()) != sorted(self.inherent_features):
raise ValueError(
"`inherent_conditionals` keys should be equal to `inherent_features`."
)
inherently_conditional_values = np.array(
[inherent_conditionals[k] for k in self.inherent_features], dtype=np.float32
)
if self.interpolator.method == "linear":
edges, weights = self.interpolator(
inherently_conditional_values, return_aux=True
)
weights = np.array([float(weight.squeeze()) for weight in weights]).reshape(
-1, 1
)
gaussians = [self.gaussians[edge][0] for edge in edges]
log_probs = np.zeros((len(gaussians), len(X)), dtype=np.float128)
for i, gaussian in enumerate(gaussians):
log_probs[i, :] = gaussian.score_samples(X, conditional_features)
return logsumexp(log_probs, axis=0, b=weights)
else:
edge = self.interpolator(inherently_conditional_values, return_aux=True)
gaussian = self.gaussian[edge][0]
return gaussian.score_samples(X, conditional_features)
def sample(
self,
inherent_conditionals,
conditionals=None,
n_samples=1,
random_state=None,
keep_dims=False,
):
"""Generate random samples from the conditional model. For `inherent_condtitionals`,
there's only one mode of sampling, where only scalar values are accepted.
For `conditionals` there are two different modes:
(1) specify conditionals as scalar values and sample `n_samples` out of distribution.
(2) specify conditionals as an array, where the number of samples will be the length of an array.
Args:
inherent_conditionals (dict): values of inherent (grid) features.
This values are used to interpolate on the grid.
All inherently conditional dimensions must be defined.
conditionals (dict): desired variables (features) to condition upon.
Dictionary keys should be only feature names from `features`.
For example, if `self.features == ["a", "b", "c"]` and one would like to condition
on "a" and "c", then `conditionals = {"a": cond_val_a, "c": cond_val_c}`.
Conditioned values can be either `float` or `array`, where in the case of the
latter, all conditioned arrays have to be of the same size.
Defaults to `None`, i.e. normal KDE.
n_samples (int): number of samples to generate. Defaults to 1.
random_state (np.random.RandomState, int): seed or `RandomState` instance, optional.
Determines random number generation used to generate
random samples. See `Glossary <random_state>`.
keep_dims (bool): whether to return non-conditioned dimensions only
or keep given conditional values. Defaults to `False`.
Returns:
Array of samples of shape `(n_samples, N + n_features)` if `conditional_variables is None`,
or `(n_samples, n_features - len(conditionals))` otherwise.
"""
N = len(self.inherent_features)
if not isinstance(inherent_conditionals, dict):
raise TypeError(
f"`inherent_conditionals` should be dictionary, but is {type(inherent_conditionals).__name__}"
)
if sorted(inherent_conditionals.keys()) != sorted(self.inherent_features):
raise ValueError(
"`inherent_conditionals` keys should be equal to `inherent_features`."
)
inherently_conditional_values = np.array(
[inherent_conditionals[k] for k in self.inherent_features], dtype=np.float32
)
if isinstance(random_state, np.random.RandomState):
rs = random_state
elif random_state is None or isinstance(random_state, int):
rs = np.random.RandomState(seed=random_state)
else:
raise TypeError("`random_state` should be `int` or `RandomState`.")
if self.interpolator.method == "linear":
edges, weights = self.interpolator(
inherently_conditional_values, return_aux=True
)
weights = [float(weight.squeeze()) for weight in weights]
gaussians = [self.gaussians[edge][0] for edge in edges]
all_samples = [
gaussian.sample(
conditionals=conditionals,
n_samples=n_samples,
random_state=rs,
keep_dims=keep_dims,
)
for gaussian in gaussians
]
# I shouldn't use old n_samples further, as it can differ for the vectorized conditionals
n_samples = len(all_samples[0])
all_samples = np.concatenate(all_samples, axis=0)
all_weights = [np.ones(n_samples) * w for w in weights]
all_weights = np.concatenate(all_weights)
all_weights /= all_weights.sum()
idx = rs.choice(len(all_samples), n_samples, p=all_weights)
samples = all_samples[idx]
if keep_dims:
return np.stack(
[
np.broadcast_to(inherently_conditional_values, (n_samples, N)),
samples,
],
axis=-1,
)
else:
return samples
else:
edge = self.interpolator(inherently_conditional_values, return_aux=True)
gaussian = self.gaussians[edge][0]
samples = gaussian.sample(
conditionals=conditionals,
n_samples=n_samples,
random_state=rs,
keep_dims=keep_dims,
)
# I shouldn't use old n_samples further, as it can differ for the vectorized conditionals
n_samples = len(samples)
if keep_dims:
np.stack(
[
np.broadcast_to(inherently_conditional_values, (n_samples, N)),
samples,
],
axis=-1,
)
else:
return samples
class InterpolatedConditionalKernelDensity:
"""Interpolated Conditional Kernel Density estimator.
With respect to the `ConditionalKernelDensity`, which fits full distribution
and slices through it to obtain the conditional distribution, here we allow
for some dimensions of the data to be inherently conditional.
For such dimensions, data should be available for every point on a grid.
To compute the final conditional density, one then interpolates
for the inherently conditional dimensions, and slices through others as before.
Args:
whitening_algorithm (str): data whitening algorithm, either `None`, "rescale" or "ZCA".
See `util.DataWhitener` for more details. "rescale" by default.
bandwidth (str, float): the width of the Gaussian centered around every point.
It can be either:\n
(1) "scott", using Scott's parameter,\n
(2) "optimized", which minimizes cross entropy to find the optimal bandwidth, or\n
(3) `float`, specifying the actual value.\n
By default, it uses Scott's parameter.
**kwargs: additional kwargs used in the case of "optimized" bandwidth.
steps (int): how many steps to use in optimization, 10 by default.\n
cv_fold (int): cross validation fold, 5 by default.\n
n_jobs (int): number of jobs to run cross validation in parallel,
-1 by default, i.e. using all available processors.\n
verbose (int): verbosity of the cross validation run,
for more details see `sklearn.model_selection.GridSearchCV`.
"""
def __init__(
self,
whitening_algorithm="rescale",
bandwidth="scott",
**kwargs,
):
if whitening_algorithm not in [None, "rescale", "ZCA"]:
raise ValueError(
f"Whitening lgorithm should be None, rescale or ZCA, but is {whitening_algorithm}."
)
self.algorithm = whitening_algorithm
if not isinstance(bandwidth, (int, float)):
if not bandwidth in ["scott", "optimized"]:
raise ValueError(
f"""Bandwidth should be a number, "scott" or "optimized",
but has value {bandwidth} and type {type(bandwidth).__name__}."""
)
self.bandwidth = bandwidth
if self.bandwidth == "optimized":
self.bandwidth_kwargs = {
"steps": kwargs.get("steps", 10),
"cv_fold": kwargs.get("cv_fold", 5),
"n_jobs": kwargs.get("n_jobs", -1),
"verbose": kwargs.get("verbose", 0),
}
else:
self.bandwidth_kwargs = {}
self.inherent_features = None # all inherently conditional features
self.features = None # all other features
self.interpolator = None # interpolator
self.kdes = None # fitted array of Kernel Density Estimators
def fit(
self,
data,
inherent_features=None,
features=None,
interpolation_points=None,
interpolation_method="linear",
):
"""Fitting the Interpolated Conditional Kernel Density.
Let's define by Y = (y1, y2, ..., yN) inherently conditional random variables of the dataset,
and by X = (x1, x2, ..., xM) other variables, for which one has a sample of points.
This function then fits P(X | Y) for every point on a gridded Y space.
To make this possible, one needs to pass a set of X samples for every point on a grid.
Later, one can use interpolation in Y and slicing in X
to compute P(x1, x2 | x3, ..., xM, y1, ..., yN), or similar.
Note that all Y values need to be conditioned.
Args:
data (list of arrays, array): data to fit.
Of shape `(n_interp_1, n_interp_2, ..., n_samples, n_features)`.
For every point on a grid `(n_interp_1, n_interp_2, ..., n_interp_N)`
one needs to pass `(n_samples, n_features)` dataset, for which
a separate `n_features`-dim Gaussian KDE is fitted.
All points on a grid have to have the same number of features (`n_features`).
In the case `n_samples` is not the same for every point,
one needs to pass a nested list of arrays.
inherent_features (list): optional, list defining name of every
inherently conditional feature. It is used for referencing conditional dimensions.
Defaults to `[-1, -2, ..., -N]`, where `N` is the number of inherently conditional features.
features (list): optional, list defining name for every other feature.
It's used for referencing conditional dimensions.
Defaults to `[0, 1, ..., n_features - 1]`.
interpolation_points (dict): optional, a dictionary of `feature: list_of_values` pairs.
This defines the grid points for every inherently conditional feature.
Every list of values should be a strictly ascending.
By default it amounts to:
`{-1: np.linspace(0, 1, n_interp_1), ..., -N: np.linspace(0, 1, n_interp_N)}`.
interpolation_method (str): either "linear" or "nearest",
making linear interpolation between distributions or picking the closest one, respectively.
Returns:
An instance of itself.
"""
if isinstance(data, np.ndarray):
if len(data.shape) < 3:
raise ValueError(
"`data` should have at least 3 axes: one for 1D grid, one for samples, "
f"one for the rest of features, but its shape is {data.shape}"
)
N = len(data.shape) - 2
number_of_samples = data.shape[:N]
n_features = data.shape[-1]
data = data.reshape((np.prod(number_of_samples),) + data.shape[-2:])
elif isinstance(data, list):
# only calculating N, leaving main checks for later
def list_depth(l, ns=[]):
ns.append(len(l))
if isinstance(l[0], list):
ns, nf = list_depth(l[0], ns)
elif isinstance(l[0], np.ndarray):
nf = l[0].shape[-1]
else:
raise ValueError(
"`data` of type `list` should not contain anything else "
"besides other sublists or `np.ndarray`."
)
return ns, nf
number_of_samples, n_features = list_depth(data)
N = len(number_of_samples)
data = np.array(data, dtype=object)
if len(data.shape) != N and len(data.shape) != N + 2:
raise ValueError(
f"Total shape of the data should be equal to `N` ({N})"
"if `n_samples` is different for different points on the grid, "
"or `N + 2` if all points on the grid have the same number of samples."
)
if data.shape[:N] != tuple(number_of_samples):
raise ValueError(
f"Something is wrong with the shape of the data ({data.shape}). "
f"Are you sure you defined all points on a grid?"
)
if len(data.shape) == N:
data = data.flatten()
else:
data = data.reshape((np.prod(number_of_samples),) + data.shape[-2:])
else:
raise TypeError(
f"`data` should be `np.ndarray` or `list`, but is {type(data).__name__}"
)
if inherent_features is None:
self.inherent_features = list(range(-1, -N - 1, -1))
else:
if len(inherent_features) != N:
raise ValueError(
"Number of `inherent_features` should be equal to "
f"{N}, but is {len(features)}."
)
if len(inherent_features) != len(set(inherent_features)):
raise ValueError("All `inherent_features` should be unique.")
self.inherent_features = inherent_features
if features is None:
self.features = list(range(n_features))
else:
if len(features) != n_features:
raise ValueError(
"Number of `features` should be equal to "
f"{n_features}, but is {len(features)}"
)
if len(features) != len(set(features)):
raise ValueError("All `features` should be unique.")
self.features = features
if interpolation_points is None:
interpolation_points = {
i: np.linspace(0, 1, n) for i, n in enumerate(number_of_samples)
}
else:
if len(interpolation_points) != N:
raise ValueError(
f"Number of interpolation points ({len(interpolation_points)}) "
f"should be equal to the number of inherently conditional dimensions ({N}), "
f"but is ({len(interpolation_points)})."
)
if not all(
k in self.inherent_features for k in interpolation_points.keys()
):
raise ValueError(
f"All keys of `interpolation_points` ({interpolation_points.keys()}) "
f"should be in `inherent_features` ({self.inherent_features})."
)
points = [v for k, v in interpolation_points.items()]
self.interpolator = Interpolator(points, method=interpolation_method)
self.kdes = []
for d in data:
kde = ConditionalGaussianKernelDensity(
whitening_algorithm=self.algorithm,
bandwidth=self.bandwidth,
**self.bandwidth_kwargs,
)
kde.fit(d.astype(float), features=self.features)
self.kdes.append(kde)
self.kdes = np.array(self.kdes, dtype=object)
self.kdes = self.kdes.reshape(tuple(number_of_samples))
return self
def score_samples(self, X, inherent_conditionals, conditional_features=None):
"""Compute the conditional log-probability of each sample under the model.
For the simplicity of calculation, here the grid point is fixed by defining
a point in inherently conditional dimensions.
`X` is then an array of shape `(n, n_features)`, including all other dimensions of the data.
Args:
X (array): data of shape `(n, n_features)`.
Last dimension should match dimension of training data `(n_features)`.
inherent_conditionals (dict): values of inherent (grid) features.
This values are used to interpolate on the grid.
All inherently conditional dimensions must be defined.
conditional_features (list): subset of `self.features`, which dimensions of data
to additionally condition upon. Defaults to `None`, meaning no additionally conditioned dimensions.
Returns:
Conditional log probability for each sample in `X`, conditioned on
inherently conditional dimensions by `inherent_conditionals`
and other dimensions by `conditional_features`.
"""
N = len(self.inherent_features)
if not isinstance(inherent_conditionals, dict):
raise TypeError(
f"`inherent_conditionals` should be dictionary, but is {type(inherent_conditionals).__name__}"
)
if sorted(inherent_conditionals.keys()) != sorted(self.inherent_features):
raise ValueError(
"`inherent_conditionals` keys should be equal to `inherent_features`."
)
inherently_conditional_values = np.array(
[inherent_conditionals[k] for k in self.inherent_features], dtype=np.float32
)
if self.interpolator.method == "linear":
edges, weights = self.interpolator(
inherently_conditional_values, return_aux=True
)
weights = np.array([float(weight.squeeze()) for weight in weights]).reshape(
-1, 1
)
kdes = [self.kdes[edge][0] for edge in edges]
log_probs = np.zeros((len(kdes), len(X)), dtype=np.float128)
for i, kde in enumerate(kdes):
log_probs[i, :] = kde.score_samples(X, conditional_features)
return logsumexp(log_probs, axis=0, b=weights)
else:
edge = self.interpolator(inherently_conditional_values, return_aux=True)
kde = self.kdes[edge][0]
return kde.score_samples(X, conditional_features)
def sample(
self,
inherent_conditionals,
conditionals=None,
n_samples=1,
random_state=None,
keep_dims=False,
):
"""Generate random samples from the conditional model. For `inherent_condtitionals`,
there's only one mode of sampling, where only scalar values are accepted.
For `conditionals` there are two different modes:
(1) specify conditionals as scalar values and sample `n_samples` out of distribution.
(2) specify conditionals as an array, where the number of samples will be the length of an array.
Args:
inherent_conditionals (dict): values of inherent (grid) features.
This values are used to interpolate on the grid.
All inherently conditional dimensions must be defined.
conditionals (dict): desired variables (features) to condition upon.
Dictionary keys should be only feature names from `features`.
For example, if `self.features == ["a", "b", "c"]` and one would like to condition
on "a" and "c", then `conditionals = {"a": cond_val_a, "c": cond_val_c}`.
Conditioned values can be either `float` or `array`, where in the case of the
latter, all conditioned arrays have to be of the same size.
Defaults to `None`, i.e. normal KDE.
n_samples (int): number of samples to generate. Defaults to 1.
random_state (np.random.RandomState, int): seed or `RandomState` instance, optional.
Determines random number generation used to generate
random samples. See `Glossary <random_state>`.
keep_dims (bool): whether to return non-conditioned dimensions only
or keep given conditional values. Defaults to `False`.
Returns:
Array of samples of shape `(n_samples, N + n_features)` if `conditional_variables is None`,
or `(n_samples, n_features - len(conditionals))` otherwise.
"""
N = len(self.inherent_features)
if not isinstance(inherent_conditionals, dict):
raise TypeError(
f"`inherent_conditionals` should be dictionary, but is {type(inherent_conditionals).__name__}"
)
if sorted(inherent_conditionals.keys()) != sorted(self.inherent_features):
raise ValueError(
"`inherent_conditionals` keys should be equal to `inherent_features`."
)
inherently_conditional_values = np.array(
[inherent_conditionals[k] for k in self.inherent_features], dtype=np.float32
)
if isinstance(random_state, np.random.RandomState):
rs = random_state
elif random_state is None or isinstance(random_state, int):
rs = np.random.RandomState(seed=random_state)
else:
raise TypeError("`random_state` should be `int` or `RandomState`.")
if self.interpolator.method == "linear":
edges, weights = self.interpolator(
inherently_conditional_values, return_aux=True
)
weights = [float(weight.squeeze()) for weight in weights]
kdes = [self.kdes[edge][0] for edge in edges]
all_samples = [
kde.sample(
conditionals=conditionals,
n_samples=n_samples,
random_state=rs,
keep_dims=keep_dims,
)
for kde in kdes
]
# I shouldn't use old n_samples further, as it can differ for the vectorized conditionals
n_samples = len(all_samples[0])
all_samples = np.concatenate(all_samples, axis=0)
all_weights = [np.ones(n_samples) * w for w in weights]
all_weights = np.concatenate(all_weights)
all_weights /= all_weights.sum()
idx = rs.choice(len(all_samples), n_samples, p=all_weights)
samples = all_samples[idx]
if keep_dims:
return np.stack(
[
np.broadcast_to(inherently_conditional_values, (n_samples, N)),
samples,
],
axis=-1,
)
else:
return samples
else:
edge = self.interpolator(inherently_conditional_values, return_aux=True)
kde = self.kdes[edge][0]
samples = kde.sample(
conditionals=conditionals,
n_samples=n_samples,
random_state=rs,
keep_dims=keep_dims,
)
# I shouldn't use old n_samples further, as it can differ for the vectorized conditionals
n_samples = len(samples)
if keep_dims:
np.stack(
[
np.broadcast_to(inherently_conditional_values, (n_samples, N)),
samples,
],
axis=-1,
)
else:
return samples
| 34,620 | 44.916446 | 115 | py |
conditional_kde | conditional_kde-main/tests/conftest.py | import pytest
import numpy as np
@pytest.fixture(scope="session")
def grid():
return {
"x": np.linspace(0, 1, 5),
"y": np.linspace(-1, 1, 3),
"z": np.linspace(-1, 1, 4),
}
| 207 | 15 | 35 | py |
conditional_kde | conditional_kde-main/tests/test_util.py | import pytest
import numpy as np
from conditional_kde.util import DataWhitener, Interpolator
from math import isclose
@pytest.fixture(scope="module")
def random_sample():
mean = np.array([[1, 2, 3]], dtype=np.float32)
std = np.array([[1, 5, 10]], dtype=np.float32)
sample = np.random.normal(size=(100, 3))
return mean + sample * std
@pytest.fixture(scope="module")
def values(grid):
mesh = np.meshgrid(*grid.values(), indexing="ij")
return np.sum(np.stack(mesh, axis=0), axis=0)
def test_whitener(random_sample):
algorithms = [None, "center", "rescale", "PCA", "ZCA"]
dw = {}
for algo in algorithms:
dw[algo] = DataWhitener(algo)
dw[algo].fit(random_sample)
with pytest.raises(ValueError):
DataWhitener("bla").fit(random_sample)
samples = [
np.array([0.0, 0.0, 0.0]),
np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0]]),
np.array([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0]]),
]
for sample in samples:
whiten_unwhiten_res = [
dw[algo].unwhiten(dw[algo].whiten(sample)) for algo in algorithms
]
for res in whiten_unwhiten_res:
assert sample.shape == res.shape
assert np.allclose(whiten_unwhiten_res[0], res)
def test_interpolator(values, grid):
interp = Interpolator(list(grid.values()), values, method="linear")
xi = np.array([0.5, -0.1, 0.7])
assert isclose(sum(xi), interp(xi, return_aux=False))
| 1,483 | 27.538462 | 77 | py |
conditional_kde | conditional_kde-main/tests/__init__.py | """Unit test package for conditional_kde."""
| 45 | 22 | 44 | py |
conditional_kde | conditional_kde-main/docs/conf.py | #!/usr/bin/env python
#
# conditional_kde documentation build configuration file, created by
# sphinx-quickstart on Fri Jun 9 13:47:02 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another
# directory, add these directories to sys.path here. If the directory is
# relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(".."))
import conditional_kde
# -- General configuration ---------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.doctest",
"sphinx.ext.extlinks",
"sphinx.ext.ifconfig",
"sphinx.ext.napoleon",
"sphinx.ext.todo",
"sphinx.ext.viewcode",
"sphinx.ext.mathjax",
"sphinx.ext.autosectionlabel",
"autoclasstoc",
"nbsphinx",
]
autosummary_generate = True
napoleon_include_private_with_doc = True
autodoc_default_options = {
"special-members": "__call__",
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "Conditional KDE"
copyright = "2022, David Prelogović"
author = "David Prelogović"
# The version info for the project you're documenting, acts as replacement
# for |version| and |release|, also used in various other places throughout
# the built documents.
#
# The short X.Y version.
version = conditional_kde.__version__
# The full version, including alpha/beta/rc tags.
release = conditional_kde.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "default"
pygments_dark_style = "monokai"
highlight_language = "python3"
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output -------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "furo"
# Theme options are theme-specific and customize the look and feel of a
# theme further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# -- Options for HTMLHelp output ---------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = "conditional_kdedoc"
# -- Options for LaTeX output ------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"conditional_kde.tex",
"Conditional KDE Documentation",
"David Prelogović",
"manual",
),
]
# -- Options for manual page output ------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, "conditional_kde", "Conditional KDE Documentation", [author], 1)
]
# -- Options for Texinfo output ----------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"conditional_kde",
"Conditional KDE Documentation",
author,
"conditional_kde",
"One line description of project.",
"Miscellaneous",
),
]
| 5,443 | 28.427027 | 81 | py |
avatarify-python | avatarify-python-master/afy/camera_selector.py | import cv2
import numpy as np
import yaml
from afy.utils import log
g_selected_cam = None
def query_cameras(n_cams):
cam_frames = {}
cap = None
for camid in range(n_cams):
log(f"Trying camera with id {camid}")
cap = cv2.VideoCapture(camid)
if not cap.isOpened():
log(f"Camera with id {camid} is not available")
continue
ret, frame = cap.read()
if not ret or frame is None:
log(f"Could not read from camera with id {camid}")
cap.release()
continue
for i in range(10):
ret, frame = cap.read()
cam_frames[camid] = frame.copy()
cap.release()
return cam_frames
def make_grid(images, cell_size=(320, 240), cols=2):
w0, h0 = cell_size
_rows = len(images) // cols + int(len(images) % cols)
_cols = min(len(images), cols)
grid = np.zeros((h0 * _rows, w0 * _cols, 3), dtype=np.uint8)
for i, (camid, img) in enumerate(images.items()):
img = cv2.resize(img, (w0, h0))
# add rect
img = cv2.rectangle(img, (1, 1), (w0 - 1, h0 - 1), (0, 0, 255), 2)
# add id
img = cv2.putText(img, f'Camera {camid}', (10, 30), 0, 1, (0, 255, 0), 2)
c = i % cols
r = i // cols
grid[r * h0:(r + 1) * h0, c * w0:(c + 1) * w0] = img[..., :3]
return grid
def mouse_callback(event, x, y, flags, userdata):
global g_selected_cam
if event == 1:
cell_size, grid_cols, cam_frames = userdata
c = x // cell_size[0]
r = y // cell_size[1]
camid = r * grid_cols + c
if camid < len(cam_frames):
g_selected_cam = camid
def select_camera(cam_frames, window="Camera selector"):
cell_size = 320, 240
grid_cols = 2
grid = make_grid(cam_frames, cols=grid_cols)
# to fit the text if only one cam available
if grid.shape[1] == 320:
cell_size = 640, 480
grid = cv2.resize(grid, cell_size)
cv2.putText(grid, f'Click on the web camera to use', (10, grid.shape[0] - 30), 0, 0.7, (200, 200, 200), 2)
cv2.namedWindow(window)
cv2.setMouseCallback(window, mouse_callback, (cell_size, grid_cols, cam_frames))
cv2.imshow(window, grid)
while True:
key = cv2.waitKey(10)
if g_selected_cam is not None:
break
if key == 27:
break
cv2.destroyAllWindows()
if g_selected_cam is not None:
return list(cam_frames)[g_selected_cam]
else:
return list(cam_frames)[0]
if __name__ == '__main__':
with open('config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
cam_frames = query_cameras(config['query_n_cams'])
if cam_frames:
selected_cam = select_camera(cam_frames)
print(f"Selected camera {selected_cam}")
else:
log("No cameras are available")
| 2,902 | 24.919643 | 110 | py |
avatarify-python | avatarify-python-master/afy/arguments.py | from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--config", help="path to config")
parser.add_argument("--checkpoint", default='vox-cpk.pth.tar', help="path to checkpoint to restore")
parser.add_argument("--relative", dest="relative", action="store_true", help="use relative or absolute keypoint coordinates")
parser.add_argument("--adapt_scale", dest="adapt_scale", action="store_true", help="adapt movement scale based on convex hull of keypoints")
parser.add_argument("--no-pad", dest="no_pad", action="store_true", help="don't pad output image")
parser.add_argument("--enc_downscale", default=1, type=float, help="Downscale factor for encoder input. Improves performance with cost of quality.")
parser.add_argument("--virt-cam", type=int, default=0, help="Virtualcam device ID")
parser.add_argument("--no-stream", action="store_true", help="On Linux, force no streaming")
parser.add_argument("--verbose", action="store_true", help="Print additional information")
parser.add_argument("--hide-rect", action="store_true", default=False, help="Hide the helper rectangle in preview window")
parser.add_argument("--avatars", default="./avatars", help="path to avatars directory")
parser.add_argument("--is-worker", action="store_true", help="Whether to run this process as a remote GPU worker")
parser.add_argument("--is-client", action="store_true", help="Whether to run this process as a client")
parser.add_argument("--in-port", type=int, default=5557, help="Remote worker input port")
parser.add_argument("--out-port", type=int, default=5558, help="Remote worker output port")
parser.add_argument("--in-addr", type=str, default=None, help="Socket address for incoming messages, like example.com:5557")
parser.add_argument("--out-addr", type=str, default=None, help="Socker address for outcoming messages, like example.com:5558")
parser.add_argument("--jpg_quality", type=int, default=95, help="Jpeg copression quality for image transmission")
parser.set_defaults(relative=False)
parser.set_defaults(adapt_scale=False)
parser.set_defaults(no_pad=False)
opt = parser.parse_args()
if opt.is_client and (opt.in_addr is None or opt.out_addr is None):
raise ValueError("You have to set --in-addr and --out-addr")
| 2,256 | 61.694444 | 148 | py |
avatarify-python | avatarify-python-master/afy/networking.py | import zmq
import numpy as np
import msgpack
import msgpack_numpy as m
m.patch()
from afy.utils import log
def check_connection(socket, timeout=1000):
old_rcvtimeo = socket.RCVTIMEO
socket.RCVTIMEO = timeout
try:
data = msgpack.packb(([], {}))
socket.send_data('hello', data)
attr_recv, data_recv = socket.recv_data()
response = msgpack.unpackb(data_recv)
except zmq.error.Again:
return False
finally:
socket.RCVTIMEO = old_rcvtimeo
log(f"Response to hello is {response}")
return response == 'OK'
class SerializingSocket(zmq.Socket):
"""Numpy array serialization methods.
Based on https://github.com/jeffbass/imagezmq/blob/master/imagezmq/imagezmq.py#L291
Used for sending / receiving OpenCV images, which are Numpy arrays.
Also used for sending / receiving jpg compressed OpenCV images.
"""
def send_array(self, A, msg='NoName', flags=0, copy=True, track=False):
"""Sends a numpy array with metadata and text message.
Sends a numpy array with the metadata necessary for reconstructing
the array (dtype,shape). Also sends a text msg, often the array or
image name.
Arguments:
A: numpy array or OpenCV image.
msg: (optional) array name, image name or text message.
flags: (optional) zmq flags.
copy: (optional) zmq copy flag.
track: (optional) zmq track flag.
"""
md = dict(
msg=msg,
dtype=str(A.dtype),
shape=A.shape,
)
self.send_json(md, flags | zmq.SNDMORE)
return self.send(A, flags, copy=copy, track=track)
def send_data(self,
msg='NoName',
data=b'00',
flags=0,
copy=True,
track=False):
"""Send a jpg buffer with a text message.
Sends a jpg bytestring of an OpenCV image.
Also sends text msg, often the image name.
Arguments:
msg: image name or text message.
data: binary data to be sent.
flags: (optional) zmq flags.
copy: (optional) zmq copy flag.
track: (optional) zmq track flag.
"""
md = dict(msg=msg, )
self.send_json(md, flags | zmq.SNDMORE)
return self.send(data, flags, copy=copy, track=track)
def recv_array(self, flags=0, copy=True, track=False):
"""Receives a numpy array with metadata and text message.
Receives a numpy array with the metadata necessary
for reconstructing the array (dtype,shape).
Returns the array and a text msg, often the array or image name.
Arguments:
flags: (optional) zmq flags.
copy: (optional) zmq copy flag.
track: (optional) zmq track flag.
Returns:
msg: image name or text message.
A: numpy array or OpenCV image reconstructed with dtype and shape.
"""
md = self.recv_json(flags=flags)
msg = self.recv(flags=flags, copy=copy, track=track)
A = np.frombuffer(msg, dtype=md['dtype'])
return (md['msg'], A.reshape(md['shape']))
def recv_data(self, flags=0, copy=True, track=False):
"""Receives a jpg buffer and a text msg.
Receives a jpg bytestring of an OpenCV image.
Also receives a text msg, often the image name.
Arguments:
flags: (optional) zmq flags.
copy: (optional) zmq copy flag.
track: (optional) zmq track flag.
Returns:
msg: image name or text message.
data: bytestring, containing data.
"""
md = self.recv_json(flags=flags) # metadata text
data = self.recv(flags=flags, copy=copy, track=track)
return (md['msg'], data)
class SerializingContext(zmq.Context):
_socket_class = SerializingSocket
| 3,921 | 29.403101 | 87 | py |
avatarify-python | avatarify-python-master/afy/predictor_local.py | from scipy.spatial import ConvexHull
import torch
import yaml
from modules.keypoint_detector import KPDetector
from modules.generator_optim import OcclusionAwareGenerator
from sync_batchnorm import DataParallelWithCallback
import numpy as np
import face_alignment
def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False,
use_relative_movement=False, use_relative_jacobian=False):
if adapt_movement_scale:
source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume
driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume
adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area)
else:
adapt_movement_scale = 1
kp_new = {k: v for k, v in kp_driving.items()}
if use_relative_movement:
kp_value_diff = (kp_driving['value'] - kp_driving_initial['value'])
kp_value_diff *= adapt_movement_scale
kp_new['value'] = kp_value_diff + kp_source['value']
if use_relative_jacobian:
jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian']))
kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian'])
return kp_new
def to_tensor(a):
return torch.tensor(a[np.newaxis].astype(np.float32)).permute(0, 3, 1, 2) / 255
class PredictorLocal:
def __init__(self, config_path, checkpoint_path, relative=False, adapt_movement_scale=False, device=None, enc_downscale=1):
self.device = device or ('cuda' if torch.cuda.is_available() else 'cpu')
self.relative = relative
self.adapt_movement_scale = adapt_movement_scale
self.start_frame = None
self.start_frame_kp = None
self.kp_driving_initial = None
self.config_path = config_path
self.checkpoint_path = checkpoint_path
self.generator, self.kp_detector = self.load_checkpoints()
self.fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D, flip_input=True, device=self.device)
self.source = None
self.kp_source = None
self.enc_downscale = enc_downscale
def load_checkpoints(self):
with open(self.config_path) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
generator = OcclusionAwareGenerator(**config['model_params']['generator_params'],
**config['model_params']['common_params'])
generator.to(self.device)
kp_detector = KPDetector(**config['model_params']['kp_detector_params'],
**config['model_params']['common_params'])
kp_detector.to(self.device)
checkpoint = torch.load(self.checkpoint_path, map_location=self.device)
generator.load_state_dict(checkpoint['generator'])
kp_detector.load_state_dict(checkpoint['kp_detector'])
generator.eval()
kp_detector.eval()
return generator, kp_detector
def reset_frames(self):
self.kp_driving_initial = None
def set_source_image(self, source_image):
self.source = to_tensor(source_image).to(self.device)
self.kp_source = self.kp_detector(self.source)
if self.enc_downscale > 1:
h, w = int(self.source.shape[2] / self.enc_downscale), int(self.source.shape[3] / self.enc_downscale)
source_enc = torch.nn.functional.interpolate(self.source, size=(h, w), mode='bilinear')
else:
source_enc = self.source
self.generator.encode_source(source_enc)
def predict(self, driving_frame):
assert self.kp_source is not None, "call set_source_image()"
with torch.no_grad():
driving = to_tensor(driving_frame).to(self.device)
if self.kp_driving_initial is None:
self.kp_driving_initial = self.kp_detector(driving)
self.start_frame = driving_frame.copy()
self.start_frame_kp = self.get_frame_kp(driving_frame)
kp_driving = self.kp_detector(driving)
kp_norm = normalize_kp(kp_source=self.kp_source, kp_driving=kp_driving,
kp_driving_initial=self.kp_driving_initial, use_relative_movement=self.relative,
use_relative_jacobian=self.relative, adapt_movement_scale=self.adapt_movement_scale)
out = self.generator(self.source, kp_source=self.kp_source, kp_driving=kp_norm)
out = np.transpose(out['prediction'].data.cpu().numpy(), [0, 2, 3, 1])[0]
out = (np.clip(out, 0, 1) * 255).astype(np.uint8)
return out
def get_frame_kp(self, image):
kp_landmarks = self.fa.get_landmarks(image)
if kp_landmarks:
kp_image = kp_landmarks[0]
kp_image = self.normalize_alignment_kp(kp_image)
return kp_image
else:
return None
@staticmethod
def normalize_alignment_kp(kp):
kp = kp - kp.mean(axis=0, keepdims=True)
area = ConvexHull(kp[:, :2]).volume
area = np.sqrt(area)
kp[:, :2] = kp[:, :2] / area
return kp
def get_start_frame(self):
return self.start_frame
def get_start_frame_kp(self):
return self.start_frame_kp
| 5,348 | 38.622222 | 127 | py |
avatarify-python | avatarify-python-master/afy/utils.py | import sys
import time
from collections import defaultdict
import numpy as np
import cv2
def log(*args, file=sys.stderr, **kwargs):
time_str = f'{time.time():.6f}'
print(f'[{time_str}]', *args, file=file, **kwargs)
def info(*args, file=sys.stdout, **kwargs):
print(*args, file=file, **kwargs)
class Tee(object):
def __init__(self, filename, mode='w', terminal=sys.stderr):
self.file = open(filename, mode, buffering=1)
self.terminal = terminal
def __del__(self):
self.file.close()
def write(self, *args, **kwargs):
log(*args, file=self.file, **kwargs)
log(*args, file=self.terminal, **kwargs)
def __call__(self, *args, **kwargs):
return self.write(*args, **kwargs)
def flush(self):
self.file.flush()
class Logger():
def __init__(self, filename, verbose=True):
self.tee = Tee(filename)
self.verbose = verbose
def __call__(self, *args, important=False, **kwargs):
if not self.verbose and not important:
return
self.tee(*args, **kwargs)
class Once():
_id = {}
def __init__(self, what, who=log, per=1e12):
""" Do who(what) once per seconds.
what: args for who
who: callable
per: frequency in seconds.
"""
assert callable(who)
now = time.time()
if what not in Once._id or now - Once._id[what] > per:
who(what)
Once._id[what] = now
class TicToc:
def __init__(self):
self.t = None
self.t_init = time.time()
def tic(self):
self.t = time.time()
def toc(self, total=False):
if total:
return (time.time() - self.t_init) * 1000
assert self.t, 'You forgot to call tic()'
return (time.time() - self.t) * 1000
def tocp(self, str):
t = self.toc()
log(f"{str} took {t:.4f}ms")
return t
class AccumDict:
def __init__(self, num_f=3):
self.d = defaultdict(list)
self.num_f = num_f
def add(self, k, v):
self.d[k] += [v]
def __dict__(self):
return self.d
def __getitem__(self, key):
return self.d[key]
def __str__(self):
s = ''
for k in self.d:
if not self.d[k]:
continue
cur = self.d[k][-1]
avg = np.mean(self.d[k])
format_str = '{:.%df}' % self.num_f
cur_str = format_str.format(cur)
avg_str = format_str.format(avg)
s += f'{k} {cur_str} ({avg_str})\t\t'
return s
def __repr__(self):
return self.__str__()
def clamp(value, min_value, max_value):
return max(min(value, max_value), min_value)
def crop(img, p=0.7, offset_x=0, offset_y=0):
h, w = img.shape[:2]
x = int(min(w, h) * p)
l = (w - x) // 2
r = w - l
u = (h - x) // 2
d = h - u
offset_x = clamp(offset_x, -l, w - r)
offset_y = clamp(offset_y, -u, h - d)
l += offset_x
r += offset_x
u += offset_y
d += offset_y
return img[u:d, l:r], (offset_x, offset_y)
def pad_img(img, target_size, default_pad=0):
sh, sw = img.shape[:2]
w, h = target_size
pad_w, pad_h = default_pad, default_pad
if w / h > 1:
pad_w += int(sw * (w / h) - sw) // 2
else:
pad_h += int(sh * (h / w) - sh) // 2
out = np.pad(img, [[pad_h, pad_h], [pad_w, pad_w], [0,0]], 'constant')
return out
def resize(img, size, version='cv'):
return cv2.resize(img, size)
| 3,562 | 22.136364 | 74 | py |
avatarify-python | avatarify-python-master/afy/cam_fomm.py | import os, sys
from sys import platform as _platform
import glob
import yaml
import time
import requests
import numpy as np
import cv2
from afy.videocaptureasync import VideoCaptureAsync
from afy.arguments import opt
from afy.utils import info, Once, Tee, crop, pad_img, resize, TicToc
import afy.camera_selector as cam_selector
log = Tee('./var/log/cam_fomm.log')
# Where to split an array from face_alignment to separate each landmark
LANDMARK_SLICE_ARRAY = np.array([17, 22, 27, 31, 36, 42, 48, 60])
if _platform == 'darwin':
if not opt.is_client:
info('\nOnly remote GPU mode is supported for Mac (use --is-client and --connect options to connect to the server)')
info('Standalone version will be available lately!\n')
exit()
def is_new_frame_better(source, driving, predictor):
global avatar_kp
global display_string
if avatar_kp is None:
display_string = "No face detected in avatar."
return False
if predictor.get_start_frame() is None:
display_string = "No frame to compare to."
return True
driving_smaller = resize(driving, (128, 128))[..., :3]
new_kp = predictor.get_frame_kp(driving)
if new_kp is not None:
new_norm = (np.abs(avatar_kp - new_kp) ** 2).sum()
old_norm = (np.abs(avatar_kp - predictor.get_start_frame_kp()) ** 2).sum()
out_string = "{0} : {1}".format(int(new_norm * 100), int(old_norm * 100))
display_string = out_string
log(out_string)
return new_norm < old_norm
else:
display_string = "No face found!"
return False
def load_stylegan_avatar():
url = "https://thispersondoesnotexist.com/image"
r = requests.get(url, headers={'User-Agent': "My User Agent 1.0"}).content
image = np.frombuffer(r, np.uint8)
image = cv2.imdecode(image, cv2.IMREAD_COLOR)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image = resize(image, (IMG_SIZE, IMG_SIZE))
return image
def load_images(IMG_SIZE = 256):
avatars = []
filenames = []
images_list = sorted(glob.glob(f'{opt.avatars}/*'))
for i, f in enumerate(images_list):
if f.endswith('.jpg') or f.endswith('.jpeg') or f.endswith('.png'):
img = cv2.imread(f)
if img is None:
log("Failed to open image: {}".format(f))
continue
if img.ndim == 2:
img = np.tile(img[..., None], [1, 1, 3])
img = img[..., :3][..., ::-1]
img = resize(img, (IMG_SIZE, IMG_SIZE))
avatars.append(img)
filenames.append(f)
return avatars, filenames
def change_avatar(predictor, new_avatar):
global avatar, avatar_kp, kp_source
avatar_kp = predictor.get_frame_kp(new_avatar)
kp_source = None
avatar = new_avatar
predictor.set_source_image(avatar)
def draw_rect(img, rw=0.6, rh=0.8, color=(255, 0, 0), thickness=2):
h, w = img.shape[:2]
l = w * (1 - rw) // 2
r = w - l
u = h * (1 - rh) // 2
d = h - u
img = cv2.rectangle(img, (int(l), int(u)), (int(r), int(d)), color, thickness)
def kp_to_pixels(arr):
'''Convert normalized landmark locations to screen pixels'''
return ((arr + 1) * 127).astype(np.int32)
def draw_face_landmarks(img, face_kp, color=(20, 80, 255)):
if face_kp is not None:
img = cv2.polylines(img, np.split(kp_to_pixels(face_kp), LANDMARK_SLICE_ARRAY), False, color)
def print_help():
info('\n\n=== Control keys ===')
info('1-9: Change avatar')
for i, fname in enumerate(avatar_names):
key = i + 1
name = fname.split('/')[-1]
info(f'{key}: {name}')
info('W: Zoom camera in')
info('S: Zoom camera out')
info('A: Previous avatar in folder')
info('D: Next avatar in folder')
info('Q: Get random avatar')
info('X: Calibrate face pose')
info('I: Show FPS')
info('ESC: Quit')
info('\nFull key list: https://github.com/alievk/avatarify#controls')
info('\n\n')
def draw_fps(frame, fps, timing, x0=10, y0=20, ystep=30, fontsz=0.5, color=(255, 255, 255)):
frame = frame.copy()
cv2.putText(frame, f"FPS: {fps:.1f}", (x0, y0 + ystep * 0), 0, fontsz * IMG_SIZE / 256, color, 1)
cv2.putText(frame, f"Model time (ms): {timing['predict']:.1f}", (x0, y0 + ystep * 1), 0, fontsz * IMG_SIZE / 256, color, 1)
cv2.putText(frame, f"Preproc time (ms): {timing['preproc']:.1f}", (x0, y0 + ystep * 2), 0, fontsz * IMG_SIZE / 256, color, 1)
cv2.putText(frame, f"Postproc time (ms): {timing['postproc']:.1f}", (x0, y0 + ystep * 3), 0, fontsz * IMG_SIZE / 256, color, 1)
return frame
def draw_landmark_text(frame, thk=2, fontsz=0.5, color=(0, 0, 255)):
frame = frame.copy()
cv2.putText(frame, "ALIGN FACES", (60, 20), 0, fontsz * IMG_SIZE / 255, color, thk)
cv2.putText(frame, "THEN PRESS X", (60, 245), 0, fontsz * IMG_SIZE / 255, color, thk)
return frame
def draw_calib_text(frame, thk=2, fontsz=0.5, color=(0, 0, 255)):
frame = frame.copy()
cv2.putText(frame, "FIT FACE IN RECTANGLE", (40, 20), 0, fontsz * IMG_SIZE / 255, color, thk)
cv2.putText(frame, "W - ZOOM IN", (60, 40), 0, fontsz * IMG_SIZE / 255, color, thk)
cv2.putText(frame, "S - ZOOM OUT", (60, 60), 0, fontsz * IMG_SIZE / 255, color, thk)
cv2.putText(frame, "THEN PRESS X", (60, 245), 0, fontsz * IMG_SIZE / 255, color, thk)
return frame
def select_camera(config):
cam_config = config['cam_config']
cam_id = None
if os.path.isfile(cam_config):
with open(cam_config, 'r') as f:
cam_config = yaml.load(f, Loader=yaml.FullLoader)
cam_id = cam_config['cam_id']
else:
cam_frames = cam_selector.query_cameras(config['query_n_cams'])
if cam_frames:
if len(cam_frames) == 1:
cam_id = list(cam_frames)[0]
else:
cam_id = cam_selector.select_camera(cam_frames, window="CLICK ON YOUR CAMERA")
log(f"Selected camera {cam_id}")
with open(cam_config, 'w') as f:
yaml.dump({'cam_id': cam_id}, f)
else:
log("No cameras are available")
return cam_id
if __name__ == "__main__":
with open('config.yaml', 'r') as f:
config = yaml.load(f, Loader=yaml.FullLoader)
global display_string
display_string = ""
IMG_SIZE = 256
log('Loading Predictor')
predictor_args = {
'config_path': opt.config,
'checkpoint_path': opt.checkpoint,
'relative': opt.relative,
'adapt_movement_scale': opt.adapt_scale,
'enc_downscale': opt.enc_downscale
}
if opt.is_worker:
from afy import predictor_worker
predictor_worker.run_worker(opt.in_port, opt.out_port)
sys.exit(0)
elif opt.is_client:
from afy import predictor_remote
try:
predictor = predictor_remote.PredictorRemote(
in_addr=opt.in_addr, out_addr=opt.out_addr,
**predictor_args
)
except ConnectionError as err:
log(err)
sys.exit(1)
predictor.start()
else:
from afy import predictor_local
predictor = predictor_local.PredictorLocal(
**predictor_args
)
cam_id = select_camera(config)
if cam_id is None:
exit(1)
cap = VideoCaptureAsync(cam_id)
cap.start()
avatars, avatar_names = load_images()
enable_vcam = not opt.no_stream
ret, frame = cap.read()
stream_img_size = frame.shape[1], frame.shape[0]
if enable_vcam:
if _platform in ['linux', 'linux2']:
try:
import pyfakewebcam
except ImportError:
log("pyfakewebcam is not installed.")
exit(1)
stream = pyfakewebcam.FakeWebcam(f'/dev/video{opt.virt_cam}', *stream_img_size)
else:
enable_vcam = False
# log("Virtual camera is supported only on Linux.")
# if not enable_vcam:
# log("Virtual camera streaming will be disabled.")
cur_ava = 0
avatar = None
change_avatar(predictor, avatars[cur_ava])
passthrough = False
cv2.namedWindow('cam', cv2.WINDOW_GUI_NORMAL)
cv2.moveWindow('cam', 500, 250)
frame_proportion = 0.9
frame_offset_x = 0
frame_offset_y = 0
overlay_alpha = 0.0
preview_flip = False
output_flip = False
find_keyframe = False
is_calibrated = False
show_landmarks = False
fps_hist = []
fps = 0
show_fps = False
print_help()
try:
while True:
tt = TicToc()
timing = {
'preproc': 0,
'predict': 0,
'postproc': 0
}
green_overlay = False
tt.tic()
ret, frame = cap.read()
if not ret:
log("Can't receive frame (stream end?). Exiting ...")
break
frame = frame[..., ::-1]
frame_orig = frame.copy()
frame, (frame_offset_x, frame_offset_y) = crop(frame, p=frame_proportion, offset_x=frame_offset_x, offset_y=frame_offset_y)
frame = resize(frame, (IMG_SIZE, IMG_SIZE))[..., :3]
if find_keyframe:
if is_new_frame_better(avatar, frame, predictor):
log("Taking new frame!")
green_overlay = True
predictor.reset_frames()
timing['preproc'] = tt.toc()
if passthrough:
out = frame
elif is_calibrated:
tt.tic()
out = predictor.predict(frame)
if out is None:
log('predict returned None')
timing['predict'] = tt.toc()
else:
out = None
tt.tic()
key = cv2.waitKey(1)
if cv2.getWindowProperty('cam', cv2.WND_PROP_VISIBLE) < 1.0:
break
elif is_calibrated and cv2.getWindowProperty('avatarify', cv2.WND_PROP_VISIBLE) < 1.0:
break
if key == 27: # ESC
break
elif key == ord('d'):
cur_ava += 1
if cur_ava >= len(avatars):
cur_ava = 0
passthrough = False
change_avatar(predictor, avatars[cur_ava])
elif key == ord('a'):
cur_ava -= 1
if cur_ava < 0:
cur_ava = len(avatars) - 1
passthrough = False
change_avatar(predictor, avatars[cur_ava])
elif key == ord('w'):
frame_proportion -= 0.05
frame_proportion = max(frame_proportion, 0.1)
elif key == ord('s'):
frame_proportion += 0.05
frame_proportion = min(frame_proportion, 1.0)
elif key == ord('H'):
frame_offset_x -= 1
elif key == ord('h'):
frame_offset_x -= 5
elif key == ord('K'):
frame_offset_x += 1
elif key == ord('k'):
frame_offset_x += 5
elif key == ord('J'):
frame_offset_y -= 1
elif key == ord('j'):
frame_offset_y -= 5
elif key == ord('U'):
frame_offset_y += 1
elif key == ord('u'):
frame_offset_y += 5
elif key == ord('Z'):
frame_offset_x = 0
frame_offset_y = 0
frame_proportion = 0.9
elif key == ord('x'):
predictor.reset_frames()
if not is_calibrated:
cv2.namedWindow('avatarify', cv2.WINDOW_GUI_NORMAL)
cv2.moveWindow('avatarify', 600, 250)
is_calibrated = True
show_landmarks = False
elif key == ord('z'):
overlay_alpha = max(overlay_alpha - 0.1, 0.0)
elif key == ord('c'):
overlay_alpha = min(overlay_alpha + 0.1, 1.0)
elif key == ord('r'):
preview_flip = not preview_flip
elif key == ord('t'):
output_flip = not output_flip
elif key == ord('f'):
find_keyframe = not find_keyframe
elif key == ord('o'):
show_landmarks = not show_landmarks
elif key == ord('q'):
try:
log('Loading StyleGAN avatar...')
avatar = load_stylegan_avatar()
passthrough = False
change_avatar(predictor, avatar)
except:
log('Failed to load StyleGAN avatar')
elif key == ord('l'):
try:
log('Reloading avatars...')
avatars, avatar_names = load_images()
passthrough = False
log("Images reloaded")
except:
log('Image reload failed')
elif key == ord('i'):
show_fps = not show_fps
elif 48 < key < 58:
cur_ava = min(key - 49, len(avatars) - 1)
passthrough = False
change_avatar(predictor, avatars[cur_ava])
elif key == 48:
passthrough = not passthrough
elif key != -1:
log(key)
if overlay_alpha > 0:
preview_frame = cv2.addWeighted( avatar, overlay_alpha, frame, 1.0 - overlay_alpha, 0.0)
else:
preview_frame = frame.copy()
if show_landmarks:
# Dim the background to make it easier to see the landmarks
preview_frame = cv2.convertScaleAbs(preview_frame, alpha=0.5, beta=0.0)
draw_face_landmarks(preview_frame, avatar_kp, (200, 20, 10))
frame_kp = predictor.get_frame_kp(frame)
draw_face_landmarks(preview_frame, frame_kp)
if preview_flip:
preview_frame = cv2.flip(preview_frame, 1)
if green_overlay:
green_alpha = 0.8
overlay = preview_frame.copy()
overlay[:] = (0, 255, 0)
preview_frame = cv2.addWeighted( preview_frame, green_alpha, overlay, 1.0 - green_alpha, 0.0)
timing['postproc'] = tt.toc()
if find_keyframe:
preview_frame = cv2.putText(preview_frame, display_string, (10, 220), 0, 0.5 * IMG_SIZE / 256, (255, 255, 255), 1)
if show_fps:
preview_frame = draw_fps(preview_frame, fps, timing)
if not is_calibrated:
preview_frame = draw_calib_text(preview_frame)
elif show_landmarks:
preview_frame = draw_landmark_text(preview_frame)
if not opt.hide_rect:
draw_rect(preview_frame)
cv2.imshow('cam', preview_frame[..., ::-1])
if out is not None:
if not opt.no_pad:
out = pad_img(out, stream_img_size)
if output_flip:
out = cv2.flip(out, 1)
if enable_vcam:
out = resize(out, stream_img_size)
stream.schedule_frame(out)
cv2.imshow('avatarify', out[..., ::-1])
fps_hist.append(tt.toc(total=True))
if len(fps_hist) == 10:
fps = 10 / (sum(fps_hist) / 1000)
fps_hist = []
except KeyboardInterrupt:
log("main: user interrupt")
log("stopping camera")
cap.stop()
cv2.destroyAllWindows()
if opt.is_client:
log("stopping remote predictor")
predictor.stop()
log("main: exit")
| 16,026 | 31.708163 | 135 | py |
avatarify-python | avatarify-python-master/afy/videocaptureasync.py | # https://github.com/gilbertfrancois/video-capture-async
import threading
import cv2
import time
WARMUP_TIMEOUT = 10.0
class VideoCaptureAsync:
def __init__(self, src=0, width=640, height=480):
self.src = src
self.cap = cv2.VideoCapture(self.src)
if not self.cap.isOpened():
raise RuntimeError("Cannot open camera")
self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
self.grabbed, self.frame = self.cap.read()
self.started = False
self.read_lock = threading.Lock()
def set(self, var1, var2):
self.cap.set(var1, var2)
def isOpened(self):
return self.cap.isOpened()
def start(self):
if self.started:
print('[!] Asynchronous video capturing has already been started.')
return None
self.started = True
self.thread = threading.Thread(target=self.update, args=(), daemon=True)
self.thread.start()
# (warmup) wait for the first successfully grabbed frame
warmup_start_time = time.time()
while not self.grabbed:
warmup_elapsed_time = (time.time() - warmup_start_time)
if warmup_elapsed_time > WARMUP_TIMEOUT:
raise RuntimeError(f"Failed to succesfully grab frame from the camera (timeout={WARMUP_TIMEOUT}s). Try to restart.")
time.sleep(0.5)
return self
def update(self):
while self.started:
grabbed, frame = self.cap.read()
if not grabbed or frame is None or frame.size == 0:
continue
with self.read_lock:
self.grabbed = grabbed
self.frame = frame
def read(self):
while True:
with self.read_lock:
frame = self.frame.copy()
grabbed = self.grabbed
break
return grabbed, frame
def stop(self):
self.started = False
self.thread.join()
def __exit__(self, exec_type, exc_value, traceback):
self.cap.release()
| 2,107 | 27.876712 | 132 | py |
avatarify-python | avatarify-python-master/afy/predictor_worker.py | from predictor_local import PredictorLocal
from arguments import opt
from networking import SerializingContext, check_connection
from utils import Logger, TicToc, AccumDict, Once
import cv2
import numpy as np
import zmq
import msgpack
import msgpack_numpy as m
m.patch()
import queue
import multiprocessing as mp
import traceback
import time
PUT_TIMEOUT = 1 # s
GET_TIMEOUT = 1 # s
RECV_TIMEOUT = 1000 # ms
QUEUE_SIZE = 100
# class PredictorLocal():
# def __init__(self, *args, **kwargs):
# pass
# def __getattr__(self, *args, **kwargs):
# return lambda *args, **kwargs: None
class PredictorWorker():
def __init__(self, in_port=None, out_port=None):
self.recv_queue = mp.Queue(QUEUE_SIZE)
self.send_queue = mp.Queue(QUEUE_SIZE)
self.worker_alive = mp.Value('i', 0)
self.recv_process = mp.Process(target=self.recv_worker, args=(in_port, self.recv_queue, self.worker_alive))
self.predictor_process = mp.Process(target=self.predictor_worker, args=(self.recv_queue, self.send_queue, self.worker_alive))
self.send_process = mp.Process(target=self.send_worker, args=(out_port, self.send_queue, self.worker_alive))
def run(self):
self.worker_alive.value = 1
self.recv_process.start()
self.predictor_process.start()
self.send_process.start()
try:
self.recv_process.join()
self.predictor_process.join()
self.send_process.join()
except KeyboardInterrupt:
pass
@staticmethod
def recv_worker(port, recv_queue, worker_alive):
timing = AccumDict()
log = Logger('./var/log/recv_worker.log', verbose=opt.verbose)
ctx = SerializingContext()
socket = ctx.socket(zmq.PULL)
socket.bind(f"tcp://*:{port}")
socket.RCVTIMEO = RECV_TIMEOUT
log(f'Receiving on port {port}', important=True)
try:
while worker_alive.value:
tt = TicToc()
try:
tt.tic()
msg = socket.recv_data()
timing.add('RECV', tt.toc())
except zmq.error.Again:
log("recv timeout")
continue
#log('recv', msg[0])
method, data = msg
if method['critical']:
recv_queue.put(msg)
else:
try:
recv_queue.put(msg, block=False)
except queue.Full:
log('recv_queue full')
Once(timing, log, per=1)
except KeyboardInterrupt:
log("recv_worker: user interrupt", important=True)
worker_alive.value = 0
log("recv_worker exit", important=True)
@staticmethod
def predictor_worker(recv_queue, send_queue, worker_alive):
predictor = None
predictor_args = ()
timing = AccumDict()
log = Logger('./var/log/predictor_worker.log', verbose=opt.verbose)
try:
while worker_alive.value:
tt = TicToc()
try:
method, data = recv_queue.get(timeout=GET_TIMEOUT)
except queue.Empty:
continue
# get the latest non-critical request from the queue
# don't skip critical request
while not recv_queue.empty() and not method['critical']:
log(f"skip {method}")
method, data = recv_queue.get()
log("working on", method)
try:
tt.tic()
if method['name'] == 'predict':
image = cv2.imdecode(np.frombuffer(data, dtype='uint8'), -1)
else:
args = msgpack.unpackb(data)
timing.add('UNPACK', tt.toc())
except ValueError:
log("Invalid Message", important=True)
continue
tt.tic()
if method['name'] == "hello":
result = "OK"
elif method['name'] == "__init__":
if args == predictor_args:
log("Same config as before... reusing previous predictor")
else:
del predictor
predictor_args = args
predictor = PredictorLocal(*predictor_args[0], **predictor_args[1])
log("Initialized predictor with:", predictor_args, important=True)
result = True
tt.tic() # don't account for init
elif method['name'] == 'predict':
assert predictor is not None, "Predictor was not initialized"
result = getattr(predictor, method['name'])(image)
else:
assert predictor is not None, "Predictor was not initialized"
result = getattr(predictor, method['name'])(*args[0], **args[1])
timing.add('CALL', tt.toc())
tt.tic()
if method['name'] == 'predict':
assert isinstance(result, np.ndarray), f'Expected np.ndarray, got {result.__class__}'
ret_code, data_send = cv2.imencode(".jpg", result, [int(cv2.IMWRITE_JPEG_QUALITY), opt.jpg_quality])
else:
data_send = msgpack.packb(result)
timing.add('PACK', tt.toc())
if method['critical']:
send_queue.put((method, data_send))
else:
try:
send_queue.put((method, data_send), block=False)
except queue.Full:
log("send_queue full")
pass
Once(timing, log, per=1)
except KeyboardInterrupt:
log("predictor_worker: user interrupt", important=True)
except Exception as e:
log("predictor_worker error", important=True)
traceback.print_exc()
worker_alive.value = 0
log("predictor_worker exit", important=True)
@staticmethod
def send_worker(port, send_queue, worker_alive):
timing = AccumDict()
log = Logger('./var/log/send_worker.log', verbose=opt.verbose)
ctx = SerializingContext()
socket = ctx.socket(zmq.PUSH)
socket.bind(f"tcp://*:{port}")
log(f'Sending on port {port}', important=True)
try:
while worker_alive.value:
tt = TicToc()
try:
method, data = send_queue.get(timeout=GET_TIMEOUT)
except queue.Empty:
log("send queue empty")
continue
# get the latest non-critical request from the queue
# don't skip critical request
while not send_queue.empty() and not method['critical']:
log(f"skip {method}")
method, data = send_queue.get()
log("sending", method)
tt.tic()
socket.send_data(method, data)
timing.add('SEND', tt.toc())
Once(timing, log, per=1)
except KeyboardInterrupt:
log("predictor_worker: user interrupt", important=True)
worker_alive.value = 0
log("send_worker exit", important=True)
def run_worker(in_port=None, out_port=None):
worker = PredictorWorker(in_port=in_port, out_port=out_port)
worker.run()
| 7,713 | 32.982379 | 133 | py |
avatarify-python | avatarify-python-master/afy/predictor_remote.py | from arguments import opt
from networking import SerializingContext, check_connection
from utils import Logger, TicToc, AccumDict, Once
import multiprocessing as mp
import queue
import cv2
import numpy as np
import zmq
import msgpack
import msgpack_numpy as m
m.patch()
PUT_TIMEOUT = 0.1 # s
GET_TIMEOUT = 0.1 # s
RECV_TIMEOUT = 1000 # ms
QUEUE_SIZE = 100
class PredictorRemote:
def __init__(self, *args, in_addr=None, out_addr=None, **kwargs):
self.in_addr = in_addr
self.out_addr = out_addr
self.predictor_args = (args, kwargs)
self.timing = AccumDict()
self.log = Logger('./var/log/predictor_remote.log', verbose=opt.verbose)
self.send_queue = mp.Queue(QUEUE_SIZE)
self.recv_queue = mp.Queue(QUEUE_SIZE)
self.worker_alive = mp.Value('i', 0)
self.send_process = mp.Process(
target=self.send_worker,
args=(self.in_addr, self.send_queue, self.worker_alive),
name="send_process"
)
self.recv_process = mp.Process(
target=self.recv_worker,
args=(self.out_addr, self.recv_queue, self.worker_alive),
name="recv_process"
)
self._i_msg = -1
def start(self):
self.worker_alive.value = 1
self.send_process.start()
self.recv_process.start()
self.init_remote_worker()
def stop(self):
self.worker_alive.value = 0
self.log("join worker processes...")
self.send_process.join(timeout=5)
self.recv_process.join(timeout=5)
self.send_process.terminate()
self.recv_process.terminate()
def init_remote_worker(self):
return self._send_recv_async('__init__', self.predictor_args, critical=True)
def __getattr__(self, attr):
is_critical = attr != 'predict'
return lambda *args, **kwargs: self._send_recv_async(attr, (args, kwargs), critical=is_critical)
def _send_recv_async(self, method, args, critical):
self._i_msg += 1
args, kwargs = args
tt = TicToc()
tt.tic()
if method == 'predict':
image = args[0]
assert isinstance(image, np.ndarray), 'Expected image'
ret_code, data = cv2.imencode(".jpg", image, [int(cv2.IMWRITE_JPEG_QUALITY), opt.jpg_quality])
else:
data = msgpack.packb((args, kwargs))
self.timing.add('PACK', tt.toc())
meta = {
'name': method,
'critical': critical,
'id': self._i_msg
}
self.log("send", meta)
if critical:
self.send_queue.put((meta, data))
while True:
meta_recv, data_recv = self.recv_queue.get()
if meta_recv == meta:
break
else:
try:
# TODO: find good timeout
self.send_queue.put((meta, data), timeout=PUT_TIMEOUT)
except queue.Full:
self.log('send_queue is full')
try:
meta_recv, data_recv = self.recv_queue.get(timeout=GET_TIMEOUT)
except queue.Empty:
self.log('recv_queue is empty')
return None
self.log("recv", meta_recv)
tt.tic()
if meta_recv['name'] == 'predict':
result = cv2.imdecode(np.frombuffer(data_recv, dtype='uint8'), -1)
else:
result = msgpack.unpackb(data_recv)
self.timing.add('UNPACK', tt.toc())
if opt.verbose:
Once(self.timing, per=1)
return result
@staticmethod
def send_worker(address, send_queue, worker_alive):
timing = AccumDict()
log = Logger('./var/log/send_worker.log', opt.verbose)
ctx = SerializingContext()
sender = ctx.socket(zmq.PUSH)
sender.connect(address)
log(f"Sending to {address}")
try:
while worker_alive.value:
tt = TicToc()
try:
msg = send_queue.get(timeout=GET_TIMEOUT)
except queue.Empty:
continue
tt.tic()
sender.send_data(*msg)
timing.add('SEND', tt.toc())
if opt.verbose:
Once(timing, log, per=1)
except KeyboardInterrupt:
log("send_worker: user interrupt")
finally:
worker_alive.value = 0
sender.disconnect(address)
sender.close()
ctx.destroy()
log("send_worker exit")
@staticmethod
def recv_worker(address, recv_queue, worker_alive):
timing = AccumDict()
log = Logger('./var/log/recv_worker.log')
ctx = SerializingContext()
receiver = ctx.socket(zmq.PULL)
receiver.connect(address)
receiver.RCVTIMEO = RECV_TIMEOUT
log(f"Receiving from {address}")
try:
while worker_alive.value:
tt = TicToc()
try:
tt.tic()
msg = receiver.recv_data()
timing.add('RECV', tt.toc())
except zmq.error.Again:
continue
try:
recv_queue.put(msg, timeout=PUT_TIMEOUT)
except queue.Full:
log('recv_queue full')
continue
if opt.verbose:
Once(timing, log, per=1)
except KeyboardInterrupt:
log("recv_worker: user interrupt")
finally:
worker_alive.value = 0
receiver.disconnect(address)
receiver.close()
ctx.destroy()
log("recv_worker exit")
| 5,767 | 27.413793 | 106 | py |
monodle | monodle-main/tools/get_data_distribution.py | import os, sys
import numpy as np
import matplotlib.pyplot as plt
from lib.datasets.kitti.kitti_utils import get_objects_from_label
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(os.path.dirname(BASE_DIR))
sys.path.append(ROOT_DIR)
def get_kitti_bev_distribution(root_dir='../../data',
split='trainval',
write_list=['Car']):
assert split in ['train', 'val', 'trainval', 'test']
split_dir = os.path.join(root_dir, 'KITTI', 'ImageSets', split + '.txt')
idx_list = [x.strip() for x in open(split_dir).readlines()]
data_dir = os.path.join(root_dir, 'KITTI', 'object', 'testing' if split == 'test' else 'training')
x_list = []
z_list = []
for idx in idx_list:
label_dir = os.path.join(data_dir, 'label_2')
label_file = os.path.join(label_dir, '%06d.txt' % int(idx))
assert os.path.exists(label_file)
objects = get_objects_from_label(label_file)
for obj in objects:
if obj.cls_type not in write_list: continue
# if obj.get_obj_level() != 1: continue # easy only
# if obj.get_obj_level() != 2: continue # moderate only
# if obj.get_obj_level() != 3: continue # hard only
# if obj.get_obj_level() != 4: continue # unknown only
x_list.append(obj.pos[0])
z_list.append(obj.pos[2])
z = np.array(z_list)
# print stats
print ('all samples:', len(z_list))
print ('samples > 60:', (z>60).sum())
print ('samples > 65:', (z>65).sum())
print ('samples < 5: ', (z<5).sum())
print ('samples < 10:', (z<10).sum())
print ('samples < 15:', (z<15).sum())
print ('samples < 20:', (z<20).sum())
print ('samples in [5,15]:', (z<15).sum() - (z<5).sum())
print ('samples in [10,20]:', (z<20).sum() - (z<10).sum())
# show distribution
plt.title(split)
plt.xlabel('x-value')
plt.ylabel('z-label')
plt.scatter(x_list, z_list, s=1)
plt.savefig('./bev.png', dpi=300)
if __name__ == '__main__':
get_kitti_bev_distribution() | 2,128 | 34.483333 | 102 | py |
monodle | monodle-main/tools/train_val.py | import os
import sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = os.path.dirname(BASE_DIR)
sys.path.append(ROOT_DIR)
import yaml
import argparse
import datetime
from lib.helpers.model_helper import build_model
from lib.helpers.dataloader_helper import build_dataloader
from lib.helpers.optimizer_helper import build_optimizer
from lib.helpers.scheduler_helper import build_lr_scheduler
from lib.helpers.trainer_helper import Trainer
from lib.helpers.tester_helper import Tester
from lib.helpers.utils_helper import create_logger
from lib.helpers.utils_helper import set_random_seed
parser = argparse.ArgumentParser(description='End-to-End Monocular 3D Object Detection')
parser.add_argument('--config', dest='config', help='settings of detection in yaml format')
parser.add_argument('-e', '--evaluate_only', action='store_true', default=False, help='evaluation only')
args = parser.parse_args()
def main():
assert (os.path.exists(args.config))
cfg = yaml.load(open(args.config, 'r'), Loader=yaml.Loader)
set_random_seed(cfg.get('random_seed', 444))
log_file = 'train.log.%s' % datetime.datetime.now().strftime('%Y%m%d_%H%M%S')
logger = create_logger(log_file)
# build dataloader
train_loader, test_loader = build_dataloader(cfg['dataset'])
# build model
model = build_model(cfg['model'])
if args.evaluate_only:
logger.info('################### Evaluation Only ##################')
tester = Tester(cfg=cfg['tester'],
model=model,
dataloader=test_loader,
logger=logger)
tester.test()
return
# build optimizer
optimizer = build_optimizer(cfg['optimizer'], model)
# build lr scheduler
lr_scheduler, warmup_lr_scheduler = build_lr_scheduler(cfg['lr_scheduler'], optimizer, last_epoch=-1)
logger.info('################### Training ##################')
logger.info('Batch Size: %d' % (cfg['dataset']['batch_size']))
logger.info('Learning Rate: %f' % (cfg['optimizer']['lr']))
trainer = Trainer(cfg=cfg['trainer'],
model=model,
optimizer=optimizer,
train_loader=train_loader,
test_loader=test_loader,
lr_scheduler=lr_scheduler,
warmup_lr_scheduler=warmup_lr_scheduler,
logger=logger)
trainer.train()
logger.info('################### Evaluation ##################' )
tester = Tester(cfg=cfg['tester'],
model=model,
dataloader=test_loader,
logger=logger)
tester.test()
if __name__ == '__main__':
main() | 2,753 | 32.585366 | 105 | py |