text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
---|---|
import json
import pickle
import sys
from datetime import datetime
from json import JSONEncoder
import numpy as np
import pandas as pd
import watchdog.events
import watchdog.observers
import time
import tensorflow as tf
import configparser
import os
from kafka import KafkaProducer
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
from tensorflow.python.keras.models import load_model
sys.path.append(sys.path[0] + '/..')
from mmt.readerMMT import eventsToFeatures
import warnings
warnings.filterwarnings('ignore')
conf_path = './config.config'
max_message_size = 104857600 #bytes
# ndarray json encoder
class NumpyArrayEncoder(JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
return JSONEncoder.default(self, obj)
# Kafka Producer
producer = KafkaProducer(bootstrap_servers=['localhost:9092'],max_request_size=max_message_size) # value_serializer=serializer
# Watchdog part for monitoring creation of csv files from mmt
class Handler(watchdog.events.PatternMatchingEventHandler):
def __init__(self):
# Watch for new csvs from mmt probe folder (./server/csv/)
watchdog.events.PatternMatchingEventHandler.__init__(self, patterns=['*_1__data.csv'],
## Monitoring csv repot files (_1_) with name with _data
ignore_directories=True, case_sensitive=False)
def on_closed(self, event):
print("Closing action on csv - % s." % event.src_path)
start_time = time.time()
mmt_csv = event.src_path
ips, x_features = eventsToFeatures(mmt_csv)
# if there are more ips then grouped samples from features (i.e. there is an ip but no features for the ip) -> we delete the ip from ip list
ips = pd.merge(ips, x_features, how='inner', on=['ip.session_id', 'meta.direction'])
ips = ips[['ip.session_id', 'meta.direction', 'ip']]
x_features.drop(columns=['ip.session_id', 'meta.direction'], inplace=True)
print("Prediction - test")
# rescaling with scaler used with trained model
x_test = np.asarray(x_features, np.float32)
x_test = scaler.transform(x_test)
# prediction
y_pred = model.predict(x_test)
y_pred = np.transpose(np.round(y_pred)).reshape(y_pred.shape[0], )
preds = np.array([y_pred]).T
# adding predictions to features as last column
res = np.append(x_features, preds, axis=1)
res = np.append(ips, res, axis=1)
# print(res.nbytes)
# results json encoding
j_res = json.dumps(res, cls=NumpyArrayEncoder).encode('utf-8')
print(f'Producing message @ {datetime.now()} | Message') # = {str(j_res)}')
psend = producer.send('predictions', j_res)
# print(psend)
producer.flush()
# pd.DataFrame(res).to_csv(f"{predictions_dir}predictions_{classification_id}.csv", index=False,
# header=prediction_names)
print("--- %s seconds ---" % (time.time() - start_time))
y_pred = None
res = None
features = None
if __name__ == "__main__":
config = configparser.ConfigParser()
config.read(conf_path)
mmt_csv_dir = config['DEFAULT']['mmt_probe_csv_dir']
model_path = config['DEFAULT']['model_path']
scaler_path = config['DEFAULT']['scaler_path']
print(f'{mmt_csv_dir},{model_path},{scaler_path}')
if not mmt_csv_dir or not model_path or not scaler_path:
exit('Config does not contain all needed paths')
print("Loading model...")
model = load_model(model_path)
print("Model loaded.\nLoading scaler...")
scaler = pickle.load(open(scaler_path, 'rb')) # "./saved_scalers/scaler_2022-03-02_10-37-27.pkl"
print("Scaler loaded.")
res=np.ndarray(shape=(2,2), dtype=float, order='F')
j_res = json.dumps(res, cls=NumpyArrayEncoder).encode('utf-8')
print(f'Producing message @ {datetime.now()} | Message') # = {str(j_res)}')
asd = producer.send('messages', j_res)
# asd = producer.send('messages', 'j_res')
print(asd)
producer.flush()
event_handler = Handler()
observer = watchdog.observers.Observer()
print("Starting watchdog.")
observer.schedule(event_handler, path=mmt_csv_dir, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
| {"hexsha": "430bf6d76f7cc6e2b878aef77405c75d0b53be3e", "size": 4546, "ext": "py", "lang": "Python", "max_stars_repo_path": "continuous_module/observer.py", "max_stars_repo_name": "Montimage/acas", "max_stars_repo_head_hexsha": "49c345cee5eabbda4833119de5403316139031b5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "continuous_module/observer.py", "max_issues_repo_name": "Montimage/acas", "max_issues_repo_head_hexsha": "49c345cee5eabbda4833119de5403316139031b5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "continuous_module/observer.py", "max_forks_repo_name": "Montimage/acas", "max_forks_repo_head_hexsha": "49c345cee5eabbda4833119de5403316139031b5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.515625, "max_line_length": 148, "alphanum_fraction": 0.6570611527, "include": true, "reason": "import numpy", "num_tokens": 1040} |
import logging
import os
import shutil
import tempfile
from urllib import request as request
import numpy as np
from ase import Atoms
import schnetpack as spk
from schnetpack.data import AtomsDataError
from schnetpack.datasets import DownloadableAtomsData
__all__ = ["MD17"]
class MD17(DownloadableAtomsData):
"""
MD17 benchmark data set for molecular dynamics of small molecules
containing molecular forces.
Args:
dbpath (str): path to database
molecule (str): Name of molecule to load into database. Allowed are:
aspirin
benzene
ethanol
malonaldehyde
naphthalene
salicylic_acid
toluene
uracil
subset (list): indices of subset. Set to None for entire dataset
(default: None)
download (bool): set true if dataset should be downloaded
(default: True)
collect_triples (bool): set true if triples for angular functions
should be computed (default: False)
load_only (list, optional): reduced set of properties to be loaded
environment_provider (spk.environment.BaseEnvironmentProvider): define how
neighborhood is calculated
(default=spk.environment.SimpleEnvironmentProvider).
See: http://quantum-machine.org/datasets/
"""
energy = "energy"
forces = "forces"
datasets_dict = dict(
aspirin="aspirin_dft.npz",
# aspirin_ccsd='aspirin_ccsd.zip',
azobenzene="azobenzene_dft.npz",
benzene="benzene_dft.npz",
ethanol="ethanol_dft.npz",
# ethanol_ccsdt='ethanol_ccsd_t.zip',
malonaldehyde="malonaldehyde_dft.npz",
# malonaldehyde_ccsdt='malonaldehyde_ccsd_t.zip',
naphthalene="naphthalene_dft.npz",
paracetamol="paracetamol_dft.npz",
salicylic_acid="salicylic_dft.npz",
toluene="toluene_dft.npz",
# toluene_ccsdt='toluene_ccsd_t.zip',
uracil="uracil_dft.npz",
)
existing_datasets = datasets_dict.keys()
def __init__(
self,
dbpath,
molecule=None,
subset=None,
download=True,
collect_triples=False,
load_only=None,
environment_provider=spk.environment.SimpleEnvironmentProvider(),
):
if not os.path.exists(dbpath) and molecule is None:
raise AtomsDataError("Provide a valid dbpath or select desired molecule!")
if molecule is not None and molecule not in MD17.datasets_dict.keys():
raise AtomsDataError("Molecule {} is not supported!".format(molecule))
self.molecule = molecule
available_properties = [MD17.energy, MD17.forces]
super(MD17, self).__init__(
dbpath=dbpath,
subset=subset,
load_only=load_only,
collect_triples=collect_triples,
download=download,
available_properties=available_properties,
environment_provider=environment_provider,
)
def create_subset(self, idx):
idx = np.array(idx)
subidx = idx if self.subset is None else np.array(self.subset)[idx]
return MD17(
dbpath=self.dbpath,
molecule=self.molecule,
subset=subidx,
download=False,
collect_triples=self.collect_triples,
load_only=self.load_only,
environment_provider=self.environment_provider,
)
def _download(self):
logging.info("Downloading {} data".format(self.molecule))
tmpdir = tempfile.mkdtemp("MD")
rawpath = os.path.join(tmpdir, self.datasets_dict[self.molecule])
url = (
"http://www.quantum-machine.org/gdml/data/npz/"
+ self.datasets_dict[self.molecule]
)
request.urlretrieve(url, rawpath)
logging.info("Parsing molecule {:s}".format(self.molecule))
data = np.load(rawpath)
numbers = data["z"]
atoms_list = []
properties_list = []
for positions, energies, forces in zip(data["R"], data["E"], data["F"]):
properties_list.append(dict(energy=energies, forces=forces))
atoms_list.append(Atoms(positions=positions, numbers=numbers))
self.add_systems(atoms_list, properties_list)
self.update_metadata(dict(data_source=self.datasets_dict[self.molecule]))
logging.info("Cleanining up the mess...")
logging.info("{} molecule done".format(self.molecule))
shutil.rmtree(tmpdir)
| {"hexsha": "4c4d16126a77c3bb2b23ff7a7c51b71f3c5a6722", "size": 4695, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/schnetpack/datasets/md17.py", "max_stars_repo_name": "giadefa/schnetpack", "max_stars_repo_head_hexsha": "9dabc3b6e3b28deb2fb3743ea1857c46b055efbf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-02-21T05:38:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-06T22:02:40.000Z", "max_issues_repo_path": "src/schnetpack/datasets/md17.py", "max_issues_repo_name": "giadefa/schnetpack", "max_issues_repo_head_hexsha": "9dabc3b6e3b28deb2fb3743ea1857c46b055efbf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-06-28T09:47:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-18T10:26:33.000Z", "max_forks_repo_path": "src/schnetpack/datasets/md17.py", "max_forks_repo_name": "giadefa/schnetpack", "max_forks_repo_head_hexsha": "9dabc3b6e3b28deb2fb3743ea1857c46b055efbf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-11-06T22:02:43.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-28T13:52:22.000Z", "avg_line_length": 32.8321678322, "max_line_length": 86, "alphanum_fraction": 0.6191693291, "include": true, "reason": "import numpy", "num_tokens": 1027} |
function create_geostationary_granule(
target_lon::Real,
target_lat::Real,
central_longitude::Real,
N_step=35,
N_pixel=1016,
x_space=5400, # footprint size E-W in m
y_space=2700 # footprint size N-S in m
)
N = N_step * N_pixel
# lon/lat projection object
wgs84 = Projection("+proj=longlat +datum=WGS84 +no_defs")
# geostationary projection
geo_proj = Projection("+proj=geos +lon_0=$(central_longitude) +h=35785831.0 +sweep=y")
# Produce a rectangular grid given the parameters
c_x = 0.0
c_y = 0.0
try
c_x, c_y = Proj4.transform(wgs84, geo_proj, [target_lon, target_lat])
catch
return nothing
end
# Move up to the "top left" scene of the grid
topleft_x = c_x + (N_step - 1) / 2 * x_space
topleft_y = c_y + (N_pixel - 1) / 2 * y_space
x_coords = Float64[]
y_coords = Float64[]
dts = DateTime[]
for xstep in 0:N_step-1
for ystep in 0:N_pixel-1
this_x = topleft_x - x_space * xstep
this_y = topleft_y - y_space * ystep
push!(x_coords, this_x)
push!(y_coords, this_y)
end
end
lons = Float64[]
lats = Float64[]
for i in 1:N
this_lon = NaN
this_lat = NaN
try
this_lon, this_lat =
Proj4.transform(geo_proj, wgs84, [x_coords[i], y_coords[i]])
catch
end
push!(lons, this_lon)
push!(lats, this_lat)
end
locarray = Geolocation[]
# Produce the location array
# These are the full granule!
for i in 1:N
this_loc = Geolocation(lons[i] ,lats[i])
push!(locarray, this_loc)
end
return geo_proj, locarray
end
function calculate_geocarb_uncertainty(continuum::Real)
#a = 833661.0
#b = 4109.0
# THIS IS RELATIVE UNCERTAINTY!
# To get absolute uncertainty for 757nm you must multiply
# by the reflected continuum level radiance!
#return 1.0 / sqrt(a * albedo * cos(deg2rad(sza)) - b)
a = 0.0004
b = 0.0016
return sqrt(a + continuum * b)
end
function GeostationaryIntensiveSampling(
radius::Real,
target_lon::Real,
target_lat::Real,
central_longitude::Real,
start_time::DateTime,
end_time::DateTime;
N_step=35,
N_pixel=1016,
x_space=5400, # footprint size E-W in m
y_space=2700, # footprint size N-S in m
stare_time=Dates.Second(9) + Dates.Millisecond(600) # How long does one frame take?
)
N = N_step * N_pixel
instrument = "Geostationary $(central_longitude)"
# This would make conversions from compound periods possible
stare_time_period = Dates.Millisecond(Dates.toms(stare_time))
geo_proj, locarray = create_geostationary_granule(
target_lon,
target_lat,
central_longitude,
N_step,
N_pixel,
x_space,
y_space
)
lons = (p -> p.lon).(locarray)
lats = (p -> p.lat).(locarray)
lon_bound_min = minimum(lons)
lon_bound_max = maximum(lons)
lat_bound_min = minimum(lats)
lat_bound_max = maximum(lats)
vnp_sd = create_VNP_SD_from_locbounds(
lon_bound_min,
lat_bound_min,
lon_bound_max,
lat_bound_max
)
scenearray = Scene[]
# repeat the loop over all locations as long as the measurement
# time stays within the stated limit..
current_frame_time = start_time
current_granule = 1
current_frame = 1
while current_frame_time <= end_time
#println(current_frame, "/", current_granule)
for i in 1:N_pixel
# Grab the location corresponding to pixel/step
this_loc = locarray[i + (current_frame - 1) * N_pixel]
this_loctime = GeolocationTime(
this_loc,
current_frame_time
)
# #############################
# Calculate viewing zenith here
# #############################
#
# Position of scene in ECEF
r_location = geodetic_to_ecef(this_loc.lon, this_loc.lat, 0.0)
# Position of satellite in ECEF
r_satellite = geodetic_to_ecef(central_longitude, 0.0, 0.0)
# Normalized location
r_norm = normalize(r_location)
r_loc_to_sat_norm = normalize(r_satellite - r_location)
_tmp = dot(r_loc_to_sat_norm, r_norm)
if (_tmp > 1) & (_tmp < 1 + 1e-6)
_tmp = 1.0
end
this_vza = rad2deg(acos(_tmp))
this_scene = create_SIF_scene(
instrument,
"N/A",
this_loctime,
this_vza,
calculate_geocarb_uncertainty,
vnp_sd
)
push!(scenearray, this_scene)
end
if current_frame < N_step
current_frame += 1
else
current_frame = 1
current_granule += 1
end
current_frame_time += stare_time_period
end
N_scene = length(scenearray)
# Once the full scenes have been done, we need to subset
# to the radius given by the user. The reason why we can
# do this only AFTER all the scenes have been calculated,
# is that we must have scenes corresponding to some real
# scanning operation, and can only filter afterwards.
radius_mask = Int[]
for i in 1:N_scene
if check_location_within_radius(target_lon, target_lat,
radius, scenearray[i].loctime)
push!(radius_mask, i)
end
end
# Match the location array with the locations found in the
# scenes. Drop any locations that don't appear in scenes.
new_locarray = unique((p -> p.loctime.loc).(scenearray))
info = "Geostationary intensive sampling at $(target_lon), $(target_lat)"
return GeostationaryIntensiveSampling(
info,
[instrument],
locarray,
scenearray[radius_mask]
)
end
| {"hexsha": "59cc25bae56044bdf2ee1e88cd27b32bb3239b90", "size": 6048, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Geostationary.jl", "max_stars_repo_name": "PeterSomkuti/jLRS-v1", "max_stars_repo_head_hexsha": "aa08479e3cf4bd855906879fd4c4239cc2ffe988", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Geostationary.jl", "max_issues_repo_name": "PeterSomkuti/jLRS-v1", "max_issues_repo_head_hexsha": "aa08479e3cf4bd855906879fd4c4239cc2ffe988", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Geostationary.jl", "max_forks_repo_name": "PeterSomkuti/jLRS-v1", "max_forks_repo_head_hexsha": "aa08479e3cf4bd855906879fd4c4239cc2ffe988", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.6857142857, "max_line_length": 90, "alphanum_fraction": 0.5924272487, "num_tokens": 1599} |
module Units
export second, minute, hour, day, year, meter, kilometer,
seconds, minutes, hours, days, years, meters, kilometers,
KiB, MiB, GiB, TiB
#####
##### Convenient definitions
#####
"""
second
A `Float64` constant equal to 1.0. Useful for increasing the clarity of scripts, e.g. `Δt = 1second`.
"""
const second = 1.0
"""
seconds
A `Float64` constant equal to 1.0. Useful for increasing the clarity of scripts, e.g. `Δt = 7seconds`.
"""
const seconds = second
"""
minute
A `Float64` constant equal to 60`seconds`. Useful for increasing the clarity of scripts, e.g. `Δt = 1minute`.
"""
const minute = 60seconds
"""
minutes
A `Float64` constant equal to 60`seconds`. Useful for increasing the clarity of scripts, e.g. `Δt = 15minutes`.
"""
const minutes = minute
"""
hour
A `Float64` constant equal to 60`minutes`. Useful for increasing the clarity of scripts, e.g. `Δt = 1hour`.
"""
const hour = 60minutes
"""
hours
A `Float64` constant equal to 60`minutes`. Useful for increasing the clarity of scripts, e.g. `Δt = 3hours`.
"""
const hours = hour
"""
day
A `Float64` constant equal to 24`hours`. Useful for increasing the clarity of scripts, e.g. `stop_time = 1day`.
"""
const day = 24hours
"""
days
A `Float64` constant equal to 24`hours`. Useful for increasing the clarity of scripts, e.g. `stop_time = 7days`.
"""
const days = day
"""
year
A `Float64` constant equal to 365`days`. Useful for increasing the clarity of scripts, e.g. `stop_time = 1year`.
"""
const year = 365days
"""
years
A `Float64` constant equal to 365`days`. Useful for increasing the clarity of scripts, e.g. `stop_time = 100years`.
"""
const years = year
"""
meter
A `Float64` constant equal to 1.0. Useful for increasing the clarity of scripts, e.g. `Lx = 1meter`.
"""
const meter = 1.0
"""
meters
A `Float64` constant equal to 1.0. Useful for increasing the clarity of scripts, e.g. `Lx = 50meters`.
"""
const meters = meter
"""
kilometer
A `Float64` constant equal to 1000`meters`. Useful for increasing the clarity of scripts, e.g. `Lx = 1kilometer`.
"""
const kilometer = 1000meters
"""
kilometers
A `Float64` constant equal to 1000`meters`. Useful for increasing the clarity of scripts, e.g. `Lx = 5000kilometers`.
"""
const kilometers = kilometer
"""
KiB
A `Float64` constant equal to 1024.0. Useful for increasing the clarity of scripts, e.g. `max_filesize = 250KiB`.
"""
const KiB = 1024.0
"""
MiB
A `Float64` constant equal to 1024`KiB`. Useful for increasing the clarity of scripts, e.g. `max_filesize = 100MiB`.
"""
const MiB = 1024KiB
"""
GiB
A `Float64` constant equal to 1024`MiB`. Useful for increasing the clarity of scripts, e.g. `max_filesize = 50GiB`.
"""
const GiB = 1024MiB
"""
TiB
A `Float64` constant equal to 1024`GiB`. Useful for increasing the clarity of scripts, e.g. `max_filesize = 2TiB`.
"""
const TiB = 1024GiB
end
| {"hexsha": "573ca5fc3af3115abb164c589203fb951d2066b3", "size": 2954, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Units.jl", "max_stars_repo_name": "charleskawczynski/Oceananigans.jl", "max_stars_repo_head_hexsha": "c34e6cd2166bbaa057186ffa795d348c1802485f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 393, "max_stars_repo_stars_event_min_datetime": "2020-05-07T19:20:35.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T02:09:52.000Z", "max_issues_repo_path": "src/Units.jl", "max_issues_repo_name": "charleskawczynski/Oceananigans.jl", "max_issues_repo_head_hexsha": "c34e6cd2166bbaa057186ffa795d348c1802485f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1408, "max_issues_repo_issues_event_min_datetime": "2020-05-09T11:39:27.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T23:52:36.000Z", "max_forks_repo_path": "src/Units.jl", "max_forks_repo_name": "charleskawczynski/Oceananigans.jl", "max_forks_repo_head_hexsha": "c34e6cd2166bbaa057186ffa795d348c1802485f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 99, "max_forks_repo_forks_event_min_datetime": "2020-05-10T02:59:08.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T21:50:55.000Z", "avg_line_length": 21.4057971014, "max_line_length": 117, "alphanum_fraction": 0.6763710223, "num_tokens": 886} |
#!/usr/bin/python3
import csv
import numpy as np
import matplotlib.pyplot as plt
def parsecsvdata():
'''retrns a list. [0] is lan and [1] wan data'''
summary = [] # list that will contain [(LAN), (WAN)]
#open csv data
with open("/home/student/mikerauer/graphing/2018summary.csv",\
"r") as downtime:
# parse csv data with csv.reader
downdata = csv.reader(downtime, delimiter=",")
for row in downdata:
rowdat = (row[0], row[1], row[2], row[3])
summary.append(rowdat) # add dict to list
return summary
def main():
N = 4
summary = parsecsvdata() # grabs data
localnetMeans = summary[0] #LAN Length of outage (mins)
wanMeans = summary[1] #WAN length of outage (min)
ind = np.arange(N) # the x locations for the group
# the width of the cars: can also be len(x) sequence
width = 0.35
#describe where to display p1
p1 = plt.bar(ind, localnetMeans, width)
#stack p2 on top of p1
p2 = plt.bar(ind, wanMeans, width, bottom=localnetMeans)
#Describe the table metadata
plt.ylabel("Length of Outage (mins)")
plt.title("2018 Network Summary")
plt.xticks(ind, ("Q1", "Q2", "Q3", "Q4"))
plt.yticks(np.arange(0, 81, 10))
plt.legend((p1[0], p2[0]), ("LAN", "WAN"))
#display graph
#plt.show()
#Save the graph
plt.savefig\
("/home/student/mikerauer/graphing/2018summary2.png")
print("graph created")
main()
| {"hexsha": "a419e39dde0c4aeecf5b04a8560e22b3aa38e2a1", "size": 1469, "ext": "py", "lang": "Python", "max_stars_repo_path": "graphing/gramaker-csvreader.py", "max_stars_repo_name": "mikerauer/pyb-class", "max_stars_repo_head_hexsha": "b7f6202c58df654eb81263d12c2634fa37a27e07", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "graphing/gramaker-csvreader.py", "max_issues_repo_name": "mikerauer/pyb-class", "max_issues_repo_head_hexsha": "b7f6202c58df654eb81263d12c2634fa37a27e07", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "graphing/gramaker-csvreader.py", "max_forks_repo_name": "mikerauer/pyb-class", "max_forks_repo_head_hexsha": "b7f6202c58df654eb81263d12c2634fa37a27e07", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.7169811321, "max_line_length": 66, "alphanum_fraction": 0.617426821, "include": true, "reason": "import numpy", "num_tokens": 433} |
# Copyright (c) 2019, NVIDIA Corporation. All rights reserved.
#
# This work is made available under the Nvidia Source Code License-NC.
# To view a copy of this license, visit
# https://nvlabs.github.io/stylegan2/license.html
import json
import argparse
import numpy as np
import dnnlib
import dnnlib.tflib as tflib
import re
import sys
import os
import csv
import math
import projector
import pretrained_networks
from training import dataset
from training import misc
#----------------------------------------------------------------------------
def project_image(proj, targets, png_prefix, num_snapshots):
snapshot_steps = set(proj.num_steps - np.linspace(0, proj.num_steps, num_snapshots, endpoint=False, dtype=int))
misc.save_image_grid(targets, png_prefix + 'target.png', drange=[-1,1])
proj.start(targets)
while proj.get_cur_step() < proj.num_steps:
print('\r%d / %d ... ' % (proj.get_cur_step(), proj.num_steps), end='', flush=True)
proj.step()
if proj.get_cur_step() in snapshot_steps:
misc.save_image_grid(proj.get_images(), png_prefix + 'step%04d.png' % proj.get_cur_step(), drange=[-1,1])
print('\r%-30s\r' % '', end='', flush=True)
#----------------------------------------------------------------------------
def project_generated_images(submit_config, network_pkl, seeds, num_snapshots, truncation_psi):
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
proj = projector.Projector()
proj.verbose = submit_config.verbose
proj.set_network(Gs)
noise_vars = [var for name, var in Gs.components.synthesis.vars.items() if name.startswith('noise')]
Gs_kwargs = dnnlib.EasyDict()
Gs_kwargs.randomize_noise = False
Gs_kwargs.truncation_psi = truncation_psi
for seed_idx, seed in enumerate(seeds):
print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:])
tflib.set_vars({var: rnd.randn(*var.shape.as_list()) for var in noise_vars})
images = Gs.run(z, None, **Gs_kwargs)
project_image(proj, targets=images, png_prefix=dnnlib.make_run_dir_path('seed%04d-' % seed), num_snapshots=num_snapshots)
#----------------------------------------------------------------------------
def project_real_images(submit_config, network_pkl, dataset_name, data_dir, num_images, num_snapshots):
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
proj = projector.Projector()
proj.verbose = submit_config.verbose
proj.set_network(Gs)
print('Loading images from "%s"...' % dataset_name)
dataset_obj = dataset.load_dataset(data_dir=data_dir, tfrecord_dir=dataset_name, max_label_size=0, repeat=False, shuffle_mb=0)
print('dso shape: ' + str(dataset_obj.shape) + ' vs gs shape: ' + str(Gs.output_shape[1:]))
assert dataset_obj.shape == Gs.output_shape[1:]
for image_idx in range(num_images):
print('Projecting image %d/%d ...' % (image_idx, num_images))
images, _labels = dataset_obj.get_minibatch_np(1)
images = misc.adjust_dynamic_range(images, [0, 255], [-1, 1])
project_image(proj, targets=images, png_prefix=dnnlib.make_run_dir_path('image%04d-' % image_idx), num_snapshots=num_snapshots)
#----------------------------------------------------------------------------
def generate_grid_of_variants(submit_config, network_pkl, truncation_psi, latents_file):
print('starting process of generating grid of variants of ' + latents_file)
tflib.init_tf({'rnd.np_random_seed': 1000})
f = open(latents_file, 'r')
original_latents = np.array(json.load(f))
f.close()
print('loaded original latents from ' + latents_file)
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
grid_size = (32, 1)
grid_labels = []
grid_latents = np.ndarray(shape=(grid_size[0]*grid_size[1],512))
for i in range(grid_size[0] * grid_size[1]):
grid_latents[i] = original_latents
grid_fakes = Gs.run(grid_latents, grid_labels, is_validation=True, minibatch_size=4)
misc.save_image_grid(grid_fakes, dnnlib.make_run_dir_path('latentmod-1.png'), drange=[-1,1], grid_size=grid_size)
def generate_mutated_grid(submit_config, network_pkl, truncation_psi, latents_file, minibatch_size=4):
print('starting process of generating grid of variants of ' + latents_file)
tflib.init_tf({'rnd.np_random_seed': 1000})
grid_size = (128, 1)
grid_labels = []
f = open(latents_file, 'r')
original_latents = np.array(json.load(f))
f.close()
print('loaded original latents from ' + latents_file)
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
w_avg = Gs.get_var('dlatent_avg') # [component]
Gs_syn_kwargs = dnnlib.EasyDict()
Gs_syn_kwargs.randomize_noise = False
Gs_syn_kwargs.minibatch_size = minibatch_size
all_latents = []
ltnts = original_latents[0]
print('Generating W vectors...')
for i in range(grid_size[0]*grid_size[1]):
ltnts = mutate_latents(ltnts, 4)
all_latents.append(ltnts)
all_z = np.stack(all_latents)
# all_z = np.stack([mutate_latents(original_latents[0], i) for i in range(grid_size[0]*grid_size[1])])
all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]
all_w = w_avg + (all_w - w_avg) * truncation_psi # [minibatch, layer, component]
print('Generating images...')
all_images = Gs.components.synthesis.run(all_w, **Gs_syn_kwargs) # [minibatch, height, width, channel]
misc.save_image_grid(all_images, dnnlib.make_run_dir_path('latentmod-1.png'), drange=[-1,1], grid_size=grid_size)
def mutate_latents(latents, num_mutations):
ltnts = np.array(latents)
for i in range(num_mutations):
index = math.floor(np.random.random()*len(latents))
# increment = ((np.random.random()*3)-1)
increment = np.random.random()
print('chosen index is ' + str(index) + ', adding ' + str(increment))
ltnts[index] += increment
return ltnts
def generate_interpolation_between(submit_config, network_pkl, truncation_psi, latents_file_start, latents_file_end, num_steps, minibatch_size=4):
print('starting process of generating interpolation between ' + latents_file_start + ' and ' + latents_file_end)
tflib.init_tf({'rnd.np_random_seed': 1000})
grid_size = (num_steps, 1)
grid_labels = []
f = open(latents_file_start, 'r')
start_original_latents = np.array(json.load(f))
f.close()
print('loaded start latents from ' + latents_file_start)
f = open(latents_file_end, 'r')
end_original_latents = np.array(json.load(f))
f.close()
print('loaded end latents from ' + latents_file_end)
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
w_avg = Gs.get_var('dlatent_avg') # [component]
Gs_syn_kwargs = dnnlib.EasyDict()
Gs_syn_kwargs.randomize_noise = False
Gs_syn_kwargs.minibatch_size = minibatch_size
all_latents = []
start_ltnts = start_original_latents[0]
end_ltnts = end_original_latents[0]
ltnt = np.array(start_ltnts)
each_step_increment = [((end_ltnts[i] - start_ltnts[i]) / num_steps) for i in range(len(start_ltnts))] #np.zeros(len(start_ltnts))
print(str(each_step_increment))
print('Generating W vectors...')
for i in range(grid_size[0]*grid_size[1]):
for inc_index in range(len(each_step_increment)):
ltnt[inc_index] += each_step_increment[inc_index]
all_latents.append(ltnt)
ltnt = np.array(ltnt)
all_z = np.stack(all_latents)
# all_z = np.stack([mutate_latents(original_latents[0], i) for i in range(grid_size[0]*grid_size[1])])
all_w = Gs.components.mapping.run(all_z, None) # [minibatch, layer, component]
all_w = w_avg + (all_w - w_avg) * truncation_psi # [minibatch, layer, component]
print('Generating images...')
all_images = Gs.components.synthesis.run(all_w, **Gs_syn_kwargs) # [minibatch, height, width, channel]
misc.save_image_grid(all_images, dnnlib.make_run_dir_path('interpolation.png'), drange=[-1,1], grid_size=grid_size)
def get_latents_for_seeds(submit_config, network_pkl, seeds):
print('starting process of getting latents for seeds ' + str(seeds))
tflib.init_tf({'rnd.np_random_seed': 1000})
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
for seed_idx, seed in enumerate(seeds):
print('Projecting seed %d (%d/%d) ...' % (seed, seed_idx, len(seeds)))
rnd = np.random.RandomState(seed)
z = rnd.randn(1, *Gs.input_shape[1:])
f = open(dnnlib.make_run_dir_path(str(seed)+'.json'), 'w')
json.dump(z.tolist(), f)
f.close()
def find_common_latents(submit_config, network_pkl, input_dir):
print('starting process of finding common latents in directory ' + input_dir)
tflib.init_tf({'rnd.np_random_seed': 1000})
print('Loading networks from "%s"...' % network_pkl)
_G, _D, Gs = pretrained_networks.load_networks(network_pkl)
# parse the seeds out of the filenames in the input directory
seeds = []
seed_re = re.compile('^seed(\\d+)\.png')
for path, dirs, files in os.walk(input_dir):
matches = [seed_re.match(fn) for fn in files]
seeds += [match.group(1) for match in matches]
# get latents for each seed in the list
latents = {}
print('operating on seeds: ' + str(seeds))
for seed_idx, seed in enumerate(seeds):
print('Projecting seed %s ...' % (seed))
rnd = np.random.RandomState(int(seed))
z = rnd.randn(1, *Gs.input_shape[1:])
latents[seed] = z
# f = open(dnnlib.make_run_dir_path(str(seed)+'.json'), 'w')
# json.dump(z.tolist(), f)
# f.close()
# compute average for each latent across all seeds
print('we have latents for ' + str(len(latents)) + ' seeds')
sums = np.zeros(512, np.float64)
for seed in latents:
this_seed_latents = latents[seed]
# print(str(this_seed_latents[0]))
for i in range(len(this_seed_latents[0])):
sums[i] += this_seed_latents[0][i]
avgs = [(sums[i] / len(latents)) for i in range(len(sums))]
# print(str(avgs))
f = open(dnnlib.make_run_dir_path('avgs.json'), 'w')
json.dump([avgs], f)
f.close()
# output the averages, and then for each seed the variance from the average for each latent
approx0 = np.zeros(512, np.int)
with open(dnnlib.make_run_dir_path('latents-analysis.csv'), 'w', newline='') as csvfile:
wrtr = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)
wrtr.writerow(avgs)
for seed in latents:
this_seed_latents = latents[seed][0]
diffs = [(this_seed_latents[i] - avgs[i]) for i in range(len(this_seed_latents))]
wrtr.writerow(diffs)
for i in range(len(diffs)):
if diffs[i] <= .01:
# if the diff between this seed's latent at this position and the average at
# this position is approximately 0, that's a sign that this position is part of
# what makes this type of image appear - these are what we're trying to find
#print('seed ' + str(seed) + ' has approx 0 at ' + str(i))
approx0[i] += 1
for i in range(len(approx0)):
print(str(i) + ': ' + str(approx0[i]) + ' approximate zeros')
def _parse_num_range(s):
'''Accept either a comma separated list of numbers 'a,b,c' or a range 'a-c' and return as a list of ints.'''
range_re = re.compile(r'^(\d+)-(\d+)$')
m = range_re.match(s)
if m:
return range(int(m.group(1)), int(m.group(2))+1)
vals = s.split(',')
return [int(x) for x in vals]
def _str_to_bool(v):
if isinstance(v, bool):
return v
if v.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif v.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Boolean value expected.')
#---------------------------------------------------------------------------- 30/*-1257
_examples = '''examples:
# Project generated images
python %(prog)s project-generated-images --network=gdrive:networks/stylegan2-car-config-f.pkl --seeds=0,1,5
# Project real images
python %(prog)s project-real-images --network=gdrive:networks/stylegan2-car-config-f.pkl --dataset=car --data-dir=~/datasets
'''
#----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description='''StyleGAN2 projector.
Run 'python %(prog)s <subcommand> --help' for subcommand help.''',
epilog=_examples,
formatter_class=argparse.RawDescriptionHelpFormatter
)
subparsers = parser.add_subparsers(help='Sub-commands', dest='command')
generate_grid_of_variants_parser = subparsers.add_parser('generate-grid-of-variants', help="Generate a grid of variants of a single generated image")
generate_grid_of_variants_parser.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
generate_grid_of_variants_parser.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=1.0)
generate_grid_of_variants_parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
generate_grid_of_variants_parser.add_argument('--latents-file', help='File containing a 512-element json array floats representing the latents of the image to generate variations on (default: %(default)s)', metavar='FILE', required=True)
generate_grid_of_variants_parser.add_argument('--verbose', help='activate verbose mode during run (defaults: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
generate_interpolation_between_parser = subparsers.add_parser('generate-interpolation-between', help="Generate a strip of images interpolation between start and end")
generate_interpolation_between_parser.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
generate_interpolation_between_parser.add_argument('--truncation-psi', type=float, help='Truncation psi (default: %(default)s)', default=1.0)
generate_interpolation_between_parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
generate_interpolation_between_parser.add_argument('--latents-file-start', help='File containing a 512-element json array floats representing the latents of the image to start the generation (default: %(default)s)', metavar='FILE', required=True)
generate_interpolation_between_parser.add_argument('--latents-file-end', help='File containing a 512-element json array floats representing the latents of the image to finish the generation (default: %(default)s)', metavar='FILE', required=True)
generate_interpolation_between_parser.add_argument('--num-steps', help='Number of steps to interpolate between the images', type=int, required=True)
generate_interpolation_between_parser.add_argument('--verbose', help='activate verbose mode during run (defaults: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
get_latents_for_seeds_parser = subparsers.add_parser('get-latents-for-seeds', help='Write out latents for seeds')
get_latents_for_seeds_parser.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
get_latents_for_seeds_parser.add_argument('--seeds', type=_parse_num_range, help='List of random seeds', default=range(3))
get_latents_for_seeds_parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
get_latents_for_seeds_parser.add_argument('--verbose', help='activate verbose mode during run (defaults: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
find_common_latents_parser = subparsers.add_parser('find-common-latents', help='Write out a csv containing latents for a directory of seeds along with difference between latent and average of that latent for each item in the vector')
find_common_latents_parser.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
find_common_latents_parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
find_common_latents_parser.add_argument('--input-dir', help='Directory containing generated images to find common latents between (default: %(default)s)', default='latent-inputs', metavar='DIR')
project_real_images_parser = subparsers.add_parser('project-real-images', help='Project real images')
project_real_images_parser.add_argument('--network', help='Network pickle filename', dest='network_pkl', required=True)
project_real_images_parser.add_argument('--data-dir', help='Dataset root directory', required=True)
project_real_images_parser.add_argument('--dataset', help='Training dataset', dest='dataset_name', required=True)
project_real_images_parser.add_argument('--num-snapshots', type=int, help='Number of snapshots (default: %(default)s)', default=5)
project_real_images_parser.add_argument('--num-images', type=int, help='Number of images to project (default: %(default)s)', default=3)
project_real_images_parser.add_argument('--result-dir', help='Root directory for run results (default: %(default)s)', default='results', metavar='DIR')
project_real_images_parser.add_argument('--verbose', help='activate verbose mode during run (defaults: %(default)s)', default=False, metavar='BOOL', type=_str_to_bool)
args = parser.parse_args()
subcmd = args.command
if subcmd is None:
print ('Error: missing subcommand. Re-run with --help for usage.')
sys.exit(1)
kwargs = vars(args)
sc = dnnlib.SubmitConfig()
sc.num_gpus = 1
sc.submit_target = dnnlib.SubmitTarget.LOCAL
sc.local.do_not_copy_source_files = True
sc.run_dir_root = kwargs.pop('result_dir')
sc.run_desc = kwargs.pop('command')
if 'verbose' in kwargs:
sc.verbose = kwargs.pop('verbose')
print('setting verbose mode to ' + str(sc.verbose))
func_name_map = {
# 'generate-grid-of-variants': 'run_latentmod.generate_grid_of_variants',
'generate-grid-of-variants': 'run_latentmod.generate_mutated_grid',
'generate-interpolation-between': 'run_latentmod.generate_interpolation_between',
'get-latents-for-seeds': 'run_latentmod.get_latents_for_seeds',
'find-common-latents': 'run_latentmod.find_common_latents'
}
dnnlib.submit_run(sc, func_name_map[subcmd], **kwargs)
#----------------------------------------------------------------------------
if __name__ == "__main__":
main()
#----------------------------------------------------------------------------
| {"hexsha": "a518a6e7fc0aec0f8348b8b1cd1edf8b234e9405", "size": 19430, "ext": "py", "lang": "Python", "max_stars_repo_path": "run_latentmod.py", "max_stars_repo_name": "colrich/stylegan2", "max_stars_repo_head_hexsha": "809ba38c67d4c395c3ab024fbff5909a607dc973", "max_stars_repo_licenses": ["BSD-Source-Code"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "run_latentmod.py", "max_issues_repo_name": "colrich/stylegan2", "max_issues_repo_head_hexsha": "809ba38c67d4c395c3ab024fbff5909a607dc973", "max_issues_repo_licenses": ["BSD-Source-Code"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "run_latentmod.py", "max_forks_repo_name": "colrich/stylegan2", "max_forks_repo_head_hexsha": "809ba38c67d4c395c3ab024fbff5909a607dc973", "max_forks_repo_licenses": ["BSD-Source-Code"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-08T16:49:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-08T16:49:40.000Z", "avg_line_length": 49.5663265306, "max_line_length": 250, "alphanum_fraction": 0.6780751415, "include": true, "reason": "import numpy", "num_tokens": 4802} |
import datetime as dt
import numpy as np
from hypothesis import given
from hypothesis.strategies import floats
import pytest
from orbit_predictor.utils import (
angle_between,
get_sun,
get_shadow,
eclipse_duration,
get_satellite_minus_penumbra_verticals,
)
# Data obtained from Astropy using the JPL ephemerides
# coords = get_body("sun",
# Time(when_utc)).represent_as(CartesianRepresentation).xyz.to("au").T.value
@pytest.mark.parametrize("when_utc,expected_eci", [
[dt.datetime(2000, 1, 1, 12), np.array([0.17705013, -0.88744275, -0.38474906])],
[dt.datetime(2009, 6, 1, 18, 30), np.array([0.32589889, 0.88109849, 0.38197646])],
[dt.datetime(2019, 11, 25, 18, 46, 0), np.array([-0.449363, -0.80638653, -0.34956405])],
[dt.datetime(2025, 12, 1, 12), np.array([-0.35042293, -0.84565374, -0.36657211])],
])
def test_get_sun_matches_expected_result_within_precision(when_utc, expected_eci):
eci = get_sun(when_utc)
assert angle_between(eci, expected_eci) < 1.0 # Claimed precision
assert angle_between(eci, expected_eci) < 0.5 # Actual precision
# Data obtained from GMAT
@pytest.mark.parametrize("when_utc,r_ecef,expected_shadow", [
[dt.datetime(2000, 1, 1, 12, 9, 0), np.array([1272.929355, 6984.992047, 1299.821897]), 2],
[dt.datetime(2000, 1, 1, 12, 30, 0), np.array([-7298.548961, 500.322464, 639.443822]), 0],
])
def test_get_shadow_matches_expected_result(when_utc, r_ecef, expected_shadow):
shadow = get_shadow(r_ecef, when_utc)
assert shadow == expected_shadow
# Data obtained from GMAT
# Testing the penumbra is much harder, because it only lasts a few seconds
# and the uncertainty in the Sun position is even larger than the angle difference
# between umbra and penumbra
@pytest.mark.xfail
@pytest.mark.parametrize("when_utc,r_ecef", [
[dt.datetime(2000, 1, 1, 12, 10, 5), np.array([-2779.471958, 6565.365892, 1625.185914])],
[dt.datetime(2000, 1, 1, 12, 10, 15), np.array([-2842.327184, 6539.439097, 1625.522584])],
])
def test_get_shadow_gives_penumbra(when_utc, r_ecef):
shadow = get_shadow(r_ecef, when_utc)
assert shadow == 1
@pytest.mark.parametrize("beta", [-90, 90])
@given(period=floats(90, 60 * 24))
def test_eclipse_duration_beta_90_is_0(beta, period):
expected_eclipse_duration = 0
eclipse_duration_value = eclipse_duration(beta, period)
assert eclipse_duration_value == expected_eclipse_duration
@given(
beta=floats(-90, 90),
period=floats(0, 60 * 24, width=16, exclude_min=True),
)
def test_eclipse_duration_dwarf_planet_always_0(beta, period):
expected_eclipse_duration = 0
eclipse_duration_value = eclipse_duration(beta, period, r_p=0)
assert eclipse_duration_value == expected_eclipse_duration
@given(
beta=floats(-90, 90).filter(lambda f: f > 1e-1),
period=floats(90, 60 * 24),
)
def test_eclipse_duration_is_maximum_at_beta_0(beta, period):
ref_eclipse_duration = eclipse_duration(0, period)
assert beta != 0
assert eclipse_duration(beta, period) < ref_eclipse_duration
# Examples taken from the predictors in test_predictors, validated with shadow function
@pytest.mark.parametrize("when_utc,r_ecef", [
[dt.datetime(2021, 9, 4, 1, 21, 15), np.array((1307.930, -258.467, -6727.760))], # illum
[dt.datetime(2021, 9, 4, 1, 25, 15), np.array((2312.642, -1713.363, -6224.066))], # eclipse
[dt.datetime(2021, 9, 4, 1, 53, 19), np.array((2104.446, -4747.296, 4476.039))], # eclipse
[dt.datetime(2021, 9, 4, 1, 57, 19), np.array((1216.010, -3660.917, 5667.907))], # illum
])
def test_satellite_minus_penumbra_consistent_with_discrete_witness_cases(when_utc, r_ecef):
if get_shadow(r_ecef, when_utc) == 2:
assert get_satellite_minus_penumbra_verticals(r_ecef, when_utc) > 0
else:
assert get_satellite_minus_penumbra_verticals(r_ecef, when_utc) < 0
@pytest.mark.parametrize("when_utc,r_ecef", [
[dt.datetime(2000, 1, 1, 12, 10, 5), np.array([-2779.471958, 6565.365892, 1625.185914])],
[dt.datetime(2000, 1, 1, 12, 10, 15), np.array([-2842.327184, 6539.439097, 1625.522584])],
])
def test_satellite_minus_penumbra_is_positive_in_illumination(when_utc, r_ecef):
assert get_satellite_minus_penumbra_verticals(r_ecef, when_utc) > 0
| {"hexsha": "fc71b3b80c1b2f2b9ebf92b41de2cf90f2645153", "size": 4281, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_sun.py", "max_stars_repo_name": "Juanlu001/orbit-predictor", "max_stars_repo_head_hexsha": "ca67e2e859932938627ed24e5cbf58c887cd99c0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_sun.py", "max_issues_repo_name": "Juanlu001/orbit-predictor", "max_issues_repo_head_hexsha": "ca67e2e859932938627ed24e5cbf58c887cd99c0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_sun.py", "max_forks_repo_name": "Juanlu001/orbit-predictor", "max_forks_repo_head_hexsha": "ca67e2e859932938627ed24e5cbf58c887cd99c0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9181818182, "max_line_length": 96, "alphanum_fraction": 0.7161878066, "include": true, "reason": "import numpy", "num_tokens": 1415} |
import h2o
import numpy as np
import os
import pandas as pd
from h2o.estimators.random_forest import H2ORandomForestEstimator
from h2o.estimators.xgboost import H2OXGBoostEstimator
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from h2o.estimators.gbm import H2OGradientBoostingEstimator
schema = {'cat_age': 'enum',
'cat_area': 'enum',
'cat_assured': 'enum',
'cat_cancel': 'enum',
#'cat_distr': 'enum',
'cat_marriage': 'enum',
'cat_sex': 'enum',
'cat_vc': 'enum',
'cat_vmm1': 'enum',
'cat_vmm2': 'enum',
'cat_vmy': 'enum',
'cat_vqpt': 'enum',
'cat_vregion': 'enum',
'cat_zip': 'enum',
'int_acc_lia': 'int',
'int_claim_plc': 'int',
'int_others': 'int',
'real_acc_dmg': 'real',
'real_acc_lia': 'real',
'real_loss_plc': 'real',
'real_prem_dmg': 'real',
'real_prem_ins': 'real',
'real_prem_lia': 'real',
'real_prem_plc': 'real',
'real_prem_thf': 'real',
'real_prem_vc': 'real',
'real_vcost': 'real',
'real_ved': 'real'}
######## get model input func ########
def get_train_input(train_only=False, ext='bs', seed=0):
'''
In:
bool(train_only),
int(seed)
Out:
DataFrame(X_train),
DataFrame(X_test),
DataFrame(y_train),
DataFrame(y_test),
Description:
if train_only, then split train data into 80/20
else read in train and test data
'''
if train_only:
np.random.seed(seed)
X_all = read_interim_data('X_train_{}.csv'.format(ext))
y_all = read_interim_data('y_train_{}.csv'.format(ext))
msk = np.random.rand(len(X_all)) < 0.8
X_train = X_all[msk]
y_train = y_all[msk]
X_test = X_all[~msk]
y_test = y_all[~msk]
else:
X_train = read_interim_data('X_train_{}.csv'.format(ext))
X_test = read_interim_data('X_test_{}.csv'.format(ext))
y_train = read_interim_data('y_train_{}.csv'.format(ext))
y_test = read_raw_data('testing-set.csv')
cols = ['real_prem_plc',
'real_prem_lia',
'cat_distr',
'int_acc_lia',
'cat_zip',
'cat_sex',
'real_acc_dmg',]
X_train = X_train[cols]
X_test = X_test[cols]
return(X_train, X_test, y_train, y_test)
######## train model func ########
def train_h2o_model(X_train, X_test, y_train, model, params):
'''
In:
DataFrame(X_train),
DataFrame(X_test),
DataFrame(y_train)
Out:
dict(output) -> includes model, fit_test, fit_train
Description:
train h2o random forest model
'''
schema = dict()
for col in X_train.columns:
if col.startswith('cat'):
schema[col] = 'enum'
elif col.startswith('int'):
schema[col] = 'int'
else:
schema[col] = 'real'
# transform to h2o format
df_train = y_train.merge(X_train, how='left', left_index=True, right_index=True)
h2o_train = h2o.H2OFrame(df_train, column_types=schema)
# split train into train and valid
train, valid = h2o_train.split_frame(ratios = [0.8], seed=0)
# separate independent variables from dependent variables
col_y = 'Next_Premium'
col_X = list(X_train.columns)
# create random forest model
rf_v1 = cv_h2o(col_X, col_y, train, valid, model, params)
# fit model to train and test data
output = {'model': rf_v1,
'fit_train': get_fit_data(rf_v1, X_train, schema),
'fit_test': get_fit_data(rf_v1, X_test, schema)
}
return(output)
######## get cross validation func ########
def cv_h2o(col_X, col_y, train, valid, model, params):
'''
In:
list(col_X),
str(col_y),
DataFrame(train),
DataFrame(valid),
list(params),
Out:
H2ORandomForestEstimator(rf)
Description:
train h2o random forest model
'''
params = [dict(zip(params,t)) for t in zip(*params.values())]
rf_list = []
mae_list = []
for p in params:
#H2ORandomForestEstimator
rf = model(**p)
rf.train(col_X, col_y, training_frame=train, validation_frame=valid)
mae = rf.mae(valid=True)
mae_list.append(mae)
rf_list.append(rf)
print(mae)
mae_min, idx = min((val, idx) for (idx, val) in enumerate(mae_list))
return rf_list[idx]
######## get model prediction func ########
def get_fit_data(model, X, schema):
'''
In:
Any(model),
DataFrame(X),
dict(schema),
Out:
DataFrame(fit)
Description:
fit model and generate submission df
'''
h2o_X = h2o.H2OFrame(X, column_types=schema)
fit = model.predict(h2o_X).as_data_frame()
fit = fit.assign(Policy_Number = X.index)
fit = fit.set_index(['Policy_Number'])
fit.columns = ['Next_Premium']
return(fit)
######## get model summary func ########
def get_analysis_on_model(model, X, y, fit):
'''
In:
DataFrame(X),
DataFrame(y),
DataFrame(fit),
Out:
dict(summary)
Description:
analyze model output
'''
# mae
mae = (y['Next_Premium'] - fit['Next_Premium']).abs().mean()
varimp = pd.DataFrame(model.varimp())
scoring_history = pd.DataFrame(model.scoring_history())
output = {'mae': mae,
'varimp': varimp,
'scoring_history': scoring_history,
}
return(output)
######## read/write func ########
def read_raw_data(file_name, index_col='Policy_Number'):
'''
In: file_name
Out: raw_data
Description: read data from directory /data/raw
'''
# set the path of raw data
raw_data_path = os.path.join(os.getcwd(), os.path.pardir, os.path.pardir, 'data', 'raw')
file_path = os.path.join(raw_data_path, file_name)
raw_data = pd.read_csv(file_path, index_col=index_col)
return(raw_data)
def read_interim_data(file_name, index_col='Policy_Number'):
'''
In: file_name
Out: interim_data
Description: read data from directory /data/interim
'''
# set the path of raw data
interim_data_path = os.path.join(os.getcwd(), os.path.pardir, os.path.pardir, 'data', 'interim')
file_path = os.path.join(interim_data_path, file_name)
interim_data = pd.read_csv(file_path, index_col=index_col)
return(interim_data)
def write_precessed_data(df):
'''
In:
DataFrame(df),
str(file_name),
Out:
None
Description:
Write sample data to directory /data/interim
'''
precessed_data_path = os.path.join(os.getcwd(), os.path.pardir, os.path.pardir, 'data', 'processed')
write_sample_path = os.path.join(precessed_data_path, 'testing-set.csv')
df.to_csv(write_sample_path)
return(None)
if __name__ == '__main__':
# ### Start H2O
# Start up a 1-node H2O cloud on your local machine, and allow it to use all CPU cores and up to 2GB of memory:
h2o.init(max_mem_size = "2G") #specify max number of bytes. uses all cores by default.
h2o.remove_all() #clean slate, in case cluster was already running
X_train, X_test, y_train, y_test = get_train_input(train_only=False, ext='fs')
# define model and parameters
rf_params = {
'ntrees': [50],
'max_depth':[20],
'stopping_metric': ['mae'],
'stopping_rounds': [2],
'score_each_iteration': [True],
'col_sample_rate_per_tree': [1],
#'sample_rate': [0.4, 0.6, 0.8],
'seed': [1000000]
}
output_rf = train_h2o_model(X_train, X_test, y_train, H2ORandomForestEstimator, rf_params)
perf_rf_train = get_analysis_on_model(output_rf['model'], X_train, y_train, output_rf['fit_train'])
perf_rf_test = get_analysis_on_model(output_rf['model'], X_test, y_test, output_rf['fit_test'])
#write_precessed_data(output_rf['fit_test'])
xg_params = {
'ntrees': [300],
#'max_depth':[15] * 3,
'learn_rate': [0.1],
'stopping_metric': ['mae'],
'stopping_rounds': [2],
'score_each_iteration': [True],
#'col_sample_rate_per_tree': [0.6, 0.8, 1],
#'sample_rate': [0.6, 0.8, 1],
'seed': [1000000]
}
output_xg = train_h2o_model(X_train, X_test, y_train, H2OXGBoostEstimator, xg_params)
perf_xg_train = get_analysis_on_model(output_xg['model'], X_train, y_train, output_xg['fit_train'])
perf_xg_test = get_analysis_on_model(output_xg['model'], X_test, y_test, output_xg['fit_test'])
ln_params = {
'lambda_search': [True],
'seed': [1000000]
}
output_ln = train_h2o_model(X_train, X_test, y_train, H2OGeneralizedLinearEstimator, ln_params)
perf_ln_train = get_analysis_on_model(output_ln['model'], X_train, y_train, output_ln['fit_train'])
perf_ln_test = get_analysis_on_model(output_ln['model'], X_test, y_test, output_ln['fit_test'])
gb_params = {
'learn_rate': [0.1, 0.2, 0.3],
'stopping_metric': ['mae'] * 3,
'stopping_rounds': [2] * 3,
'score_each_iteration': [True] * 3,
#'col_sample_rate_per_tree': [0.6, 0.8, 1],
#'sample_rate': [0.6, 0.8, 1],
'seed': [1000000] * 3
}
output_gb = train_h2o_model(X_train, X_test, y_train, H2OGradientBoostingEstimator, gb_params)
perf_gb_train = get_analysis_on_model(output_gb['model'], X_train, y_train, output_gb['fit_train'])
perf_gb_test = get_analysis_on_model(output_gb['model'], X_test, y_test, output_gb['fit_test'])
h2o.shutdown(prompt=False) | {"hexsha": "f3db39d72b70555629776fd0d4ce93648c9126f5", "size": 9493, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/models/h2o_rf_train.py", "max_stars_repo_name": "zh272/AIGOGO", "max_stars_repo_head_hexsha": "0255cf8c4776358b73ee6b1792325a151a0cfa78", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/models/h2o_rf_train.py", "max_issues_repo_name": "zh272/AIGOGO", "max_issues_repo_head_hexsha": "0255cf8c4776358b73ee6b1792325a151a0cfa78", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/models/h2o_rf_train.py", "max_forks_repo_name": "zh272/AIGOGO", "max_forks_repo_head_hexsha": "0255cf8c4776358b73ee6b1792325a151a0cfa78", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2092307692, "max_line_length": 115, "alphanum_fraction": 0.6239334246, "include": true, "reason": "import numpy", "num_tokens": 2656} |
# Loss function for XYZ maps
# https://arxiv.org/pdf/2109.07577.pdf
import torch
import copy
import torchvision.models as models
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import Visuallization as vis
######################################################################################################################333
class Loss(nn.Module):
########################################################################################################################
def __init__(self): # Create class for Loss function for XYZ maps
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # choose gpu or cpu
super(Loss, self).__init__()
self.DifLayers={}
self.ROILayers={}
# type=np.float32
##Dif filters use to find the distance between two points on the XYZ map
# ROI filters use on the ROI map to find that the points is within the ROI (otherwise the distance is invalid)
self.DifLayers['Horizontal']=torch.from_numpy(np.array( [[0,0,0] , [0,1,-1], [0,0,0]],dtype=np.float32)).to(device).unsqueeze(0).unsqueeze(0)
self.ROILayers['Horizontal'] = torch.from_numpy(np.array([[0,0,0] , [0,0,1], [0,0,0]],dtype=np.float32)).to(device).unsqueeze(0).unsqueeze(0)
self.DifLayers['Vertical'] = torch.from_numpy(np.array( [[0,0,0] , [0,1,0], [0,-1,0]],dtype=np.float32)).to(device).unsqueeze(0).unsqueeze(0)
self.ROILayers['Vertical']= torch.from_numpy(np.array( [[0,0,0] , [0,0,0], [0,1,0]],dtype=np.float32)).to(device).unsqueeze(0).unsqueeze(0)
self.DifLayers['Diagonal']= torch.from_numpy(np.array( [[0,0,0] , [0,1,0], [0,0,-1]], dtype=np.float32)).to(device).unsqueeze(0).unsqueeze(0)
self.ROILayers['Diagonal'] = torch.from_numpy(np.array( [[0,0,0] , [0,0,0], [0,0,1]], dtype=np.float32)).to(device).unsqueeze(0).unsqueeze(0)
#######################################################################################################################################################
def DiffrentialLoss(self, PredXYZ, GTXYZ,ROIMask, ConstNP=[]):
# Calculate L1 loss using distances between pair of points in XYZ maps (where predicted XYZ map is scaled to match GT map_
# If not given in ConstNp the relative scale is also calculated.
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') # choose gpu or cpu
ReCalculateNormalizationConst = (len( ConstNP) == 0) # ConstNP is the the relative scale between the predicted and GT XYZ model. This can be either given or be calculated within the function.
# print(self.DifLayers['Horizontal'])
MaxScale=np.max(ROIMask.shape[2:])# Max Scale of the dilated convolution layer (max distance between pixels in the image that will use to calculate distance between points)
MaxScale=np.min([200,MaxScale])
MinScale=1
step=3
NumLayers=int(np.ceil((MaxScale-MinScale)/step)*3*len(list(self.DifLayers))) # distance between points will be calculated on the X Y and Z axis seperately using dilated convolution with [1,-1] structure
difPrd=torch.autograd.Variable(torch.zeros(PredXYZ.shape[0],NumLayers,PredXYZ.shape[2],PredXYZ.shape[3]).to(device),requires_grad=False) # Will GT contain the distance between pairs of points in different distances in the X,Y,Z axis
difGT=torch.autograd.Variable(torch.zeros(PredXYZ.shape[0],NumLayers,PredXYZ.shape[2],PredXYZ.shape[3]).to(device),requires_grad=False) # Will Predicted contain the distance between pairs of points in different distances in the X,Y,Z axis
i=-1
for scale in range(1,MaxScale,3): # Go over all scales (distance between pixels)
for l in range(3): # Go over X,Y,Z axis
for nm in self.DifLayers: # Go over filters (relative location of points pairs, horizontal/vertical/diagonal)
i+=1
ROI = ROIMask * F.conv2d(ROIMask, self.ROILayers[nm], bias=None, stride=1, padding=scale,dilation=scale) # Check that both points are within the ROI
difPrd[:,i:i+1] = ROI*F.conv2d(PredXYZ[:,l:l+1,:,:], self.DifLayers[nm], bias=None, stride=1, padding=scale, dilation=scale)# Find distance between two points on the predicted XYZ model
difGT[:,i:i+1] = ROI*F.conv2d(GTXYZ[:,l:l+1,:,:], self.DifLayers[nm], bias=None, stride=1, padding=scale, dilation=scale) # Find distance between two points on the GT XYZ model
# print("i=",i,"NumLayers=",NumLayers)
##============================Calculate relative scale between predicted and GT maps
# ---------This part should NOT transmit gradient-----------------------------
if ReCalculateNormalizationConst: # If normalization scale constants are not pregiven calculate them
Rat = (difPrd/(difGT+0.00001)) #Ratios of difference bewtween ground truth and predicted distances between points
Rat = F.relu(Rat) # Only postive ratios can be used when calculating loss
#---------- Const minimize ratio with larger difference contribute more. Basically absolute sum of GT distances divided by absolute sum of predicted distances-------------------------
# NormConst is the relative scale between GT and predicted XYZ (one number per image)
NormConst=(torch.abs(difGT)*Rat).sum((1,2,3))/((torch.abs(difGT)*(Rat>0)).sum((1,2,3))+0.0001) # Weighted Average of Rat were the weight are the difference
ConstNP=NormConst.data.cpu().numpy() # Convert to numpy to block gradient (yes there are more efficent ways but its one number per image so it take little time)
# print("ConstNP=",ConstNP)
#----------------This part should transmit grdient-------------------------
Loss=0
for i in range(len(ConstNP)):
#=====================Loss is the absolute difference between predicted and GT XYZ maps, where the prediction is scaled by the scale constant
# print("ScaleDif",ScaleDif)
if ConstNP[i]>0.0001: # If scale constant too small ignore
Loss+=torch.abs(difGT[i]-difPrd[i]/ConstNP[i]).mean() # Calculate loss
#----------------make sure predictions will not be too large or small basically punish to small or too big scale constants ----------------------------------------------
if ReCalculateNormalizationConst: # Check that the constant are not too large or small
ROISum = ROIMask[i].sum()
if ROISum>200: # Make sure ROI is not too small to create reliable statitics
MeanPrdDif = torch.abs(difPrd[i]).sum()/(torch.abs(difGT[i]>0).sum()) # The mean average distances between points. difGT[i]>0 term is simply the number of valid distances
if MeanPrdDif>30 and ConstNP[i]>10: # Punish relative scale if it too large
Loss+=(MeanPrdDif-30)
if MeanPrdDif<2 and ConstNP[i]<0.1: # Punish relative scale if it too small
Fact = 0.1 / (ConstNP[i] + 0.001)
# print("MeanPrdDif",MeanPrdDif)
Loss += (0.2-MeanPrdDif)*Fact
# Loss/=ROIMask.shape[0]
return Loss,ConstNP # return loss and normalization scale constant
########################Find the difference between the vessel and content mask using known normalization constants=========================================
| {"hexsha": "1bdf0e1621585b9217ffa99e5aa23b4c7ecbb2b4", "size": 7512, "ext": "py", "lang": "Python", "max_stars_repo_path": "LossFunctions.py", "max_stars_repo_name": "sagieppel/Predict-3D-model-from-image-as-an-XYZ-map", "max_stars_repo_head_hexsha": "90a81bda59b2ab32afff0e5514e8fd5ae4b1a8cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-23T22:16:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T22:16:34.000Z", "max_issues_repo_path": "LossFunctions.py", "max_issues_repo_name": "sagieppel/Predict-3D-model-from-image-as-an-XYZ-map", "max_issues_repo_head_hexsha": "90a81bda59b2ab32afff0e5514e8fd5ae4b1a8cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "LossFunctions.py", "max_forks_repo_name": "sagieppel/Predict-3D-model-from-image-as-an-XYZ-map", "max_forks_repo_head_hexsha": "90a81bda59b2ab32afff0e5514e8fd5ae4b1a8cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 82.5494505495, "max_line_length": 246, "alphanum_fraction": 0.6098242812, "include": true, "reason": "import numpy", "num_tokens": 1807} |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import numpy as np
import decimal
class CompatEncoder(json.JSONEncoder):
""" Compatible encoder that supports numpy types and Decimal type
json.dumps(data, cls=CompatEncoder)
"""
def default(self, obj):
if isinstance(obj, np.integer):
return int(obj)
elif isinstance(obj, np.floating):
return float(obj)
elif isinstance(obj, np.ndarray):
return obj.tolist()
elif isinstance(obj, decimal.Decimal):
if obj % 1 > 0:
return float(obj)
else:
return int(obj)
else:
return super(CompatEncoder, self).default(obj)
| {"hexsha": "417c8483dbff8c114ceea637b782704e81f82fd7", "size": 783, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/recommendations/src/recommendations-service/experimentation/utils.py", "max_stars_repo_name": "honey-sangtani-c5i/retail-demo-store", "max_stars_repo_head_hexsha": "c76e03b2a1750d9ec16f2dd8c952b8c4c8a53ef8", "max_stars_repo_licenses": ["MIT-0"], "max_stars_count": 404, "max_stars_repo_stars_event_min_datetime": "2020-04-10T01:54:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T08:22:23.000Z", "max_issues_repo_path": "src/recommendations/src/recommendations-service/experimentation/utils.py", "max_issues_repo_name": "honey-sangtani-c5i/retail-demo-store", "max_issues_repo_head_hexsha": "c76e03b2a1750d9ec16f2dd8c952b8c4c8a53ef8", "max_issues_repo_licenses": ["MIT-0"], "max_issues_count": 144, "max_issues_repo_issues_event_min_datetime": "2020-04-14T11:53:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T07:24:44.000Z", "max_forks_repo_path": "src/recommendations/src/recommendations-service/experimentation/utils.py", "max_forks_repo_name": "honey-sangtani-c5i/retail-demo-store", "max_forks_repo_head_hexsha": "c76e03b2a1750d9ec16f2dd8c952b8c4c8a53ef8", "max_forks_repo_licenses": ["MIT-0"], "max_forks_count": 301, "max_forks_repo_forks_event_min_datetime": "2020-04-13T16:41:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T23:51:16.000Z", "avg_line_length": 29.0, "max_line_length": 69, "alphanum_fraction": 0.6079182631, "include": true, "reason": "import numpy", "num_tokens": 160} |
#!/usr/bin/env python
"""
Show how to connect to keypress events
"""
from __future__ import print_function
import sys
import numpy as np
import matplotlib.pyplot as plt
class Modulator(object):
count = 0
@classmethod
def iterate_count(cls):
cls.count += 0.1
return cls.count
def my_event(event,methd):
print('press', event.key)
sys.stdout.flush()
xpos = methd()
ax.text(xpos,0.5,event.key,
verticalalignment='bottom',horizontalalignment='right',
transform=ax.transAxes,
color='green', fontsize=25)
fig.canvas.draw()
fig, ax = plt.subplots()
fig.canvas.mpl_connect('key_press_event', lambda event: my_event(event,Modulator.iterate_count))
ax.plot(np.random.rand(12), np.random.rand(12), 'go')
xl = ax.set_xlabel('easy come, easy go')
plt.show()
| {"hexsha": "38f7154a8e8086374dbd5fb7249aa6e54c762fc3", "size": 860, "ext": "py", "lang": "Python", "max_stars_repo_path": "data-vis-mpl/scripts/keypress_demo.py", "max_stars_repo_name": "chyld/demoX", "max_stars_repo_head_hexsha": "27f26a553aeb6682173f6b1b8dc8969101993324", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "data-vis-mpl/scripts/keypress_demo.py", "max_issues_repo_name": "chyld/demoX", "max_issues_repo_head_hexsha": "27f26a553aeb6682173f6b1b8dc8969101993324", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "data-vis-mpl/scripts/keypress_demo.py", "max_forks_repo_name": "chyld/demoX", "max_forks_repo_head_hexsha": "27f26a553aeb6682173f6b1b8dc8969101993324", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2432432432, "max_line_length": 96, "alphanum_fraction": 0.6534883721, "include": true, "reason": "import numpy", "num_tokens": 207} |
import os
import sys
sys.path.append('.')
sys.path.append('/home/wamsterd/git/pretrainedresnet3d')
import json
import numpy as np
import torch
from torch import nn
from torch import optim
from torch.optim import lr_scheduler
from opts import parse_opts
from model3d import generate_model
from mean import get_mean, get_std
def get_3d_model(num_feature2d_slices=30):
opt = parse_opts()
if opt.root_path != '':
opt.video_path = os.path.join(opt.root_path, opt.video_path)
opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path)
opt.result_path = os.path.join(opt.root_path, opt.result_path)
if opt.resume_path:
opt.resume_path = os.path.join(opt.root_path, opt.resume_path)
if opt.pretrain_path:
opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path)
opt.scales = [opt.initial_scale]
for i in range(1, opt.n_scales):
opt.scales.append(opt.scales[-1] * opt.scale_step)
opt.arch = '{}-{}'.format(opt.model, opt.model_depth)
opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset)
opt.std = get_std(opt.norm_value)
opt.sample_duration=num_feature2d_slices
# print(opt)
with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file:
json.dump(vars(opt), opt_file)
torch.manual_seed(opt.manual_seed)
model, parameters = generate_model(opt)
return model
if __name__ == '__main__':
get_3d_model()
| {"hexsha": "b8f862009f2a43a6e043f0ff25f26db8e374f1a4", "size": 1472, "ext": "py", "lang": "Python", "max_stars_repo_path": "exportmodel.py", "max_stars_repo_name": "vanAmsterdam/pretrainedresnet3d", "max_stars_repo_head_hexsha": "48797ef9d021f51f82e5eacbf72ac33ca09ced4d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "exportmodel.py", "max_issues_repo_name": "vanAmsterdam/pretrainedresnet3d", "max_issues_repo_head_hexsha": "48797ef9d021f51f82e5eacbf72ac33ca09ced4d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "exportmodel.py", "max_forks_repo_name": "vanAmsterdam/pretrainedresnet3d", "max_forks_repo_head_hexsha": "48797ef9d021f51f82e5eacbf72ac33ca09ced4d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7111111111, "max_line_length": 78, "alphanum_fraction": 0.7085597826, "include": true, "reason": "import numpy", "num_tokens": 361} |
Describe Users/alyssasusan here.
eggs
| {"hexsha": "e392a4c580358ed368632a5d0fd569ca530046cc", "size": 38, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/alyssasusan.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/alyssasusan.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/alyssasusan.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 12.6666666667, "max_line_length": 32, "alphanum_fraction": 0.8421052632, "num_tokens": 12} |
[STATEMENT]
lemma ta_seq_consist_imp_sequentially_consistent:
assumes tsa_ok: "thread_start_actions_ok E"
and new_actions_for_fun: "\<And>adal a a'. \<lbrakk> a \<in> new_actions_for P E adal; a' \<in> new_actions_for P E adal \<rbrakk> \<Longrightarrow> a = a'"
and seq: "ta_seq_consist P Map.empty (lmap snd E)"
shows "\<exists>ws. sequentially_consistent P (E, ws) \<and> P \<turnstile> (E, ws) \<surd>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<exists>ws. sequentially_consistent P (E, ws) \<and> P \<turnstile> (E, ws) \<surd>
[PROOF STEP]
proof(intro exI conjI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. sequentially_consistent P (E, ?ws)
2. P \<turnstile> (E, ?ws) \<surd>
[PROOF STEP]
define ws where "ws i = (THE w. P,E \<turnstile> i \<leadsto>mrw w)" for i
[PROOF STATE]
proof (state)
this:
ws ?i = (THE w. P,E \<turnstile> ?i \<leadsto>mrw w)
goal (2 subgoals):
1. sequentially_consistent P (E, ?ws)
2. P \<turnstile> (E, ?ws) \<surd>
[PROOF STEP]
from seq
[PROOF STATE]
proof (chain)
picking this:
ta_seq_consist P Map.empty (lmap snd E)
[PROOF STEP]
have ns: "non_speculative P (\<lambda>_. {}) (lmap snd E)"
[PROOF STATE]
proof (prove)
using this:
ta_seq_consist P Map.empty (lmap snd E)
goal (1 subgoal):
1. non_speculative P (\<lambda>_. {}) (lmap snd E)
[PROOF STEP]
by(rule ta_seq_consist_into_non_speculative) simp
[PROOF STATE]
proof (state)
this:
non_speculative P (\<lambda>_. {}) (lmap snd E)
goal (2 subgoals):
1. sequentially_consistent P (E, ?ws)
2. P \<turnstile> (E, ?ws) \<surd>
[PROOF STEP]
show "sequentially_consistent P (E, ws)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sequentially_consistent P (E, ws)
[PROOF STEP]
unfolding ws_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sequentially_consistent P (E, \<lambda>i. The (most_recent_write_for P E i))
[PROOF STEP]
proof(rule sequentially_consistentI)
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>r. r \<in> read_actions E \<Longrightarrow> P,E \<turnstile> r \<leadsto>mrw The (most_recent_write_for P E r)
[PROOF STEP]
fix r
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>r. r \<in> read_actions E \<Longrightarrow> P,E \<turnstile> r \<leadsto>mrw The (most_recent_write_for P E r)
[PROOF STEP]
assume "r \<in> read_actions E"
[PROOF STATE]
proof (state)
this:
r \<in> read_actions E
goal (1 subgoal):
1. \<And>r. r \<in> read_actions E \<Longrightarrow> P,E \<turnstile> r \<leadsto>mrw The (most_recent_write_for P E r)
[PROOF STEP]
with seq new_actions_for_fun
[PROOF STATE]
proof (chain)
picking this:
ta_seq_consist P Map.empty (lmap snd E)
\<lbrakk>?a \<in> new_actions_for P E ?adal; ?a' \<in> new_actions_for P E ?adal\<rbrakk> \<Longrightarrow> ?a = ?a'
r \<in> read_actions E
[PROOF STEP]
obtain w where "P,E \<turnstile> r \<leadsto>mrw w"
[PROOF STATE]
proof (prove)
using this:
ta_seq_consist P Map.empty (lmap snd E)
\<lbrakk>?a \<in> new_actions_for P E ?adal; ?a' \<in> new_actions_for P E ?adal\<rbrakk> \<Longrightarrow> ?a = ?a'
r \<in> read_actions E
goal (1 subgoal):
1. (\<And>w. P,E \<turnstile> r \<leadsto>mrw w \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(auto dest: ta_seq_consist_most_recent_write_for)
[PROOF STATE]
proof (state)
this:
P,E \<turnstile> r \<leadsto>mrw w
goal (1 subgoal):
1. \<And>r. r \<in> read_actions E \<Longrightarrow> P,E \<turnstile> r \<leadsto>mrw The (most_recent_write_for P E r)
[PROOF STEP]
thus "P,E \<turnstile> r \<leadsto>mrw THE w. P,E \<turnstile> r \<leadsto>mrw w"
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> r \<leadsto>mrw w
goal (1 subgoal):
1. P,E \<turnstile> r \<leadsto>mrw THE w. P,E \<turnstile> r \<leadsto>mrw w
[PROOF STEP]
by(simp add: THE_most_recent_writeI)
[PROOF STATE]
proof (state)
this:
P,E \<turnstile> r \<leadsto>mrw THE w. P,E \<turnstile> r \<leadsto>mrw w
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
sequentially_consistent P (E, ws)
goal (1 subgoal):
1. P \<turnstile> (E, \<lambda>i. The (most_recent_write_for P E i)) \<surd>
[PROOF STEP]
show "P \<turnstile> (E, ws) \<surd>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. P \<turnstile> (E, ws) \<surd>
[PROOF STEP]
proof(rule wf_execI)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. is_write_seen P E ws
2. thread_start_actions_ok E
[PROOF STEP]
show "is_write_seen P E ws"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. is_write_seen P E ws
[PROOF STEP]
proof(rule is_write_seenI)
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> ws a \<in> write_actions E
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> (ad, al) \<in> action_loc P E (ws a)
3. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
4. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>hb ws a
5. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
6. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
7. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
fix a ad al v
[PROOF STATE]
proof (state)
goal (7 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> ws a \<in> write_actions E
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> (ad, al) \<in> action_loc P E (ws a)
3. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
4. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>hb ws a
5. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
6. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
7. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
assume a: "a \<in> read_actions E"
and adal: "action_obs E a = NormalAction (ReadMem ad al v)"
[PROOF STATE]
proof (state)
this:
a \<in> read_actions E
action_obs E a = NormalAction (ReadMem ad al v)
goal (7 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> ws a \<in> write_actions E
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> (ad, al) \<in> action_loc P E (ws a)
3. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
4. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>hb ws a
5. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
6. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
7. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
from ns
[PROOF STATE]
proof (chain)
picking this:
non_speculative P (\<lambda>_. {}) (lmap snd E)
[PROOF STEP]
have seq': "non_speculative P (\<lambda>_. {}) (ltake (enat a) (lmap snd E))"
[PROOF STATE]
proof (prove)
using this:
non_speculative P (\<lambda>_. {}) (lmap snd E)
goal (1 subgoal):
1. non_speculative P (\<lambda>_. {}) (ltake (enat a) (lmap snd E))
[PROOF STEP]
by(rule non_speculative_ltake)
[PROOF STATE]
proof (state)
this:
non_speculative P (\<lambda>_. {}) (ltake (enat a) (lmap snd E))
goal (7 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> ws a \<in> write_actions E
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> (ad, al) \<in> action_loc P E (ws a)
3. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
4. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>hb ws a
5. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
6. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
7. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
from seq a seq new_actions_for_fun
[PROOF STATE]
proof (chain)
picking this:
ta_seq_consist P Map.empty (lmap snd E)
a \<in> read_actions E
ta_seq_consist P Map.empty (lmap snd E)
\<lbrakk>?a \<in> new_actions_for P E ?adal; ?a' \<in> new_actions_for P E ?adal\<rbrakk> \<Longrightarrow> ?a = ?a'
[PROOF STEP]
obtain w where mrw: "P,E \<turnstile> a \<leadsto>mrw w"
and "w < a"
[PROOF STATE]
proof (prove)
using this:
ta_seq_consist P Map.empty (lmap snd E)
a \<in> read_actions E
ta_seq_consist P Map.empty (lmap snd E)
\<lbrakk>?a \<in> new_actions_for P E ?adal; ?a' \<in> new_actions_for P E ?adal\<rbrakk> \<Longrightarrow> ?a = ?a'
goal (1 subgoal):
1. (\<And>w. \<lbrakk>P,E \<turnstile> a \<leadsto>mrw w; w < a\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by(auto dest: ta_seq_consist_most_recent_write_for)
[PROOF STATE]
proof (state)
this:
P,E \<turnstile> a \<leadsto>mrw w
w < a
goal (7 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> ws a \<in> write_actions E
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> (ad, al) \<in> action_loc P E (ws a)
3. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
4. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>hb ws a
5. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
6. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
7. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence w: "ws a = w"
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> a \<leadsto>mrw w
w < a
goal (1 subgoal):
1. ws a = w
[PROOF STEP]
by(simp add: ws_def THE_most_recent_writeI)
[PROOF STATE]
proof (state)
this:
ws a = w
goal (7 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> ws a \<in> write_actions E
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> (ad, al) \<in> action_loc P E (ws a)
3. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
4. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>hb ws a
5. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
6. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
7. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
with mrw adal
[PROOF STATE]
proof (chain)
picking this:
P,E \<turnstile> a \<leadsto>mrw w
action_obs E a = NormalAction (ReadMem ad al v)
ws a = w
[PROOF STEP]
show "ws a \<in> write_actions E"
and "(ad, al) \<in> action_loc P E (ws a)"
and "\<not> P,E \<turnstile> a \<le>hb ws a"
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> a \<leadsto>mrw w
action_obs E a = NormalAction (ReadMem ad al v)
ws a = w
goal (1 subgoal):
1. ws a \<in> write_actions E &&& (ad, al) \<in> action_loc P E (ws a) &&& \<not> P,E \<turnstile> a \<le>hb ws a
[PROOF STEP]
by(fastforce elim!: most_recent_write_for.cases dest: happens_before_into_action_order antisymPD[OF antisym_action_order] read_actions_not_write_actions)+
[PROOF STATE]
proof (state)
this:
ws a \<in> write_actions E
(ad, al) \<in> action_loc P E (ws a)
\<not> P,E \<turnstile> a \<le>hb ws a
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
let ?between = "ltake (enat (a - Suc w)) (ldropn (Suc w) E)"
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
let ?prefix = "ltake (enat w) E"
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
let ?vs_prefix = "mrw_values P Map.empty (map snd (list_of ?prefix))"
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
{
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
fix v'
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
assume new: "is_new_action (action_obs E w)"
and vs': "?vs_prefix (ad, al) = \<lfloor>(v', True)\<rfloor>"
[PROOF STATE]
proof (state)
this:
is_new_action (action_obs E w)
mrw_values P Map.empty (map snd (list_of (ltake (enat w) E))) (ad, al) = \<lfloor>(v', True)\<rfloor>
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
from mrw_values_eq_SomeD[OF vs']
[PROOF STATE]
proof (chain)
picking this:
(None = \<lfloor>(v', True)\<rfloor> \<Longrightarrow> \<exists>wa. wa \<in> set (map snd (list_of (ltake (enat w) E))) \<and> is_write_action wa \<and> (ad, al) \<in> action_loc_aux P wa \<and> (True \<longrightarrow> \<not> is_new_action wa)) \<Longrightarrow> \<exists>obs' wa obs''. map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs'' \<and> is_write_action wa \<and> (ad, al) \<in> action_loc_aux P wa \<and> value_written_aux P wa al = v' \<and> is_new_action wa = (\<not> True) \<and> (\<forall>ob\<in>set obs''. is_write_action ob \<longrightarrow> (ad, al) \<in> action_loc_aux P ob \<longrightarrow> is_new_action ob \<and> True)
[PROOF STEP]
obtain obs' wa obs'' where split: "map snd (list_of ?prefix) = obs' @ wa # obs''"
and wa: "is_write_action wa"
and adal': "(ad, al) \<in> action_loc_aux P wa"
and new_wa: "\<not> is_new_action wa"
[PROOF STATE]
proof (prove)
using this:
(None = \<lfloor>(v', True)\<rfloor> \<Longrightarrow> \<exists>wa. wa \<in> set (map snd (list_of (ltake (enat w) E))) \<and> is_write_action wa \<and> (ad, al) \<in> action_loc_aux P wa \<and> (True \<longrightarrow> \<not> is_new_action wa)) \<Longrightarrow> \<exists>obs' wa obs''. map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs'' \<and> is_write_action wa \<and> (ad, al) \<in> action_loc_aux P wa \<and> value_written_aux P wa al = v' \<and> is_new_action wa = (\<not> True) \<and> (\<forall>ob\<in>set obs''. is_write_action ob \<longrightarrow> (ad, al) \<in> action_loc_aux P ob \<longrightarrow> is_new_action ob \<and> True)
goal (1 subgoal):
1. (\<And>obs' wa obs''. \<lbrakk>map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs''; is_write_action wa; (ad, al) \<in> action_loc_aux P wa; \<not> is_new_action wa\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs''
is_write_action wa
(ad, al) \<in> action_loc_aux P wa
\<not> is_new_action wa
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
from split
[PROOF STATE]
proof (chain)
picking this:
map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs''
[PROOF STEP]
have "length (map snd (list_of ?prefix)) = Suc (length obs' + length obs'')"
[PROOF STATE]
proof (prove)
using this:
map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs''
goal (1 subgoal):
1. length (map snd (list_of (ltake (enat w) E))) = Suc (length obs' + length obs'')
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
length (map snd (list_of (ltake (enat w) E))) = Suc (length obs' + length obs'')
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence len_prefix: "llength ?prefix = enat \<dots>"
[PROOF STATE]
proof (prove)
using this:
length (map snd (list_of (ltake (enat w) E))) = Suc (length obs' + length obs'')
goal (1 subgoal):
1. llength (ltake (enat w) E) = enat (Suc (length obs' + length obs''))
[PROOF STEP]
by(simp add: length_list_of_conv_the_enat min_enat1_conv_enat)
[PROOF STATE]
proof (state)
this:
llength (ltake (enat w) E) = enat (Suc (length obs' + length obs''))
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
with split
[PROOF STATE]
proof (chain)
picking this:
map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs''
llength (ltake (enat w) E) = enat (Suc (length obs' + length obs''))
[PROOF STEP]
have "nth (map snd (list_of ?prefix)) (length obs') = wa"
and "enat (length obs') < llength ?prefix"
[PROOF STATE]
proof (prove)
using this:
map snd (list_of (ltake (enat w) E)) = obs' @ wa # obs''
llength (ltake (enat w) E) = enat (Suc (length obs' + length obs''))
goal (1 subgoal):
1. map snd (list_of (ltake (enat w) E)) ! length obs' = wa &&& enat (length obs') < llength (ltake (enat w) E)
[PROOF STEP]
by simp_all
[PROOF STATE]
proof (state)
this:
map snd (list_of (ltake (enat w) E)) ! length obs' = wa
enat (length obs') < llength (ltake (enat w) E)
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence "snd (lnth ?prefix (length obs')) = wa"
[PROOF STATE]
proof (prove)
using this:
map snd (list_of (ltake (enat w) E)) ! length obs' = wa
enat (length obs') < llength (ltake (enat w) E)
goal (1 subgoal):
1. snd (lnth (ltake (enat w) E) (length obs')) = wa
[PROOF STEP]
by(simp add: list_of_lmap[symmetric] del: list_of_lmap)
[PROOF STATE]
proof (state)
this:
snd (lnth (ltake (enat w) E) (length obs')) = wa
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence wa': "action_obs E (length obs') = wa" and "enat (length obs') < llength E"
[PROOF STATE]
proof (prove)
using this:
snd (lnth (ltake (enat w) E) (length obs')) = wa
goal (1 subgoal):
1. action_obs E (length obs') = wa &&& enat (length obs') < llength E
[PROOF STEP]
using \<open>enat (length obs') < llength ?prefix\<close>
[PROOF STATE]
proof (prove)
using this:
snd (lnth (ltake (enat w) E) (length obs')) = wa
enat (length obs') < llength (ltake (enat w) E)
goal (1 subgoal):
1. action_obs E (length obs') = wa &&& enat (length obs') < llength E
[PROOF STEP]
by(auto simp add: action_obs_def lnth_ltake)
[PROOF STATE]
proof (state)
this:
action_obs E (length obs') = wa
enat (length obs') < llength E
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
with wa
[PROOF STATE]
proof (chain)
picking this:
is_write_action wa
action_obs E (length obs') = wa
enat (length obs') < llength E
[PROOF STEP]
have "length obs' \<in> write_actions E"
[PROOF STATE]
proof (prove)
using this:
is_write_action wa
action_obs E (length obs') = wa
enat (length obs') < llength E
goal (1 subgoal):
1. length obs' \<in> write_actions E
[PROOF STEP]
by(auto intro: write_actions.intros simp add: actions_def)
[PROOF STATE]
proof (state)
this:
length obs' \<in> write_actions E
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
from most_recent_write_recent[OF mrw _ this, of "(ad, al)"] adal adal' wa'
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>(ad, al) \<in> action_loc P E a; (ad, al) \<in> action_loc P E (length obs')\<rbrakk> \<Longrightarrow> E \<turnstile> length obs' \<le>a w \<or> E \<turnstile> a \<le>a length obs'
action_obs E a = NormalAction (ReadMem ad al v)
(ad, al) \<in> action_loc_aux P wa
action_obs E (length obs') = wa
[PROOF STEP]
have "E \<turnstile> length obs' \<le>a w \<or> E \<turnstile> a \<le>a length obs'"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>(ad, al) \<in> action_loc P E a; (ad, al) \<in> action_loc P E (length obs')\<rbrakk> \<Longrightarrow> E \<turnstile> length obs' \<le>a w \<or> E \<turnstile> a \<le>a length obs'
action_obs E a = NormalAction (ReadMem ad al v)
(ad, al) \<in> action_loc_aux P wa
action_obs E (length obs') = wa
goal (1 subgoal):
1. E \<turnstile> length obs' \<le>a w \<or> E \<turnstile> a \<le>a length obs'
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
E \<turnstile> length obs' \<le>a w \<or> E \<turnstile> a \<le>a length obs'
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence False
[PROOF STATE]
proof (prove)
using this:
E \<turnstile> length obs' \<le>a w \<or> E \<turnstile> a \<le>a length obs'
goal (1 subgoal):
1. False
[PROOF STEP]
using new_wa new wa' adal len_prefix \<open>w < a\<close>
[PROOF STATE]
proof (prove)
using this:
E \<turnstile> length obs' \<le>a w \<or> E \<turnstile> a \<le>a length obs'
\<not> is_new_action wa
is_new_action (action_obs E w)
action_obs E (length obs') = wa
action_obs E a = NormalAction (ReadMem ad al v)
llength (ltake (enat w) E) = enat (Suc (length obs' + length obs''))
w < a
goal (1 subgoal):
1. False
[PROOF STEP]
by(auto elim!: action_orderE simp add: min_enat1_conv_enat split: enat.split_asm)
[PROOF STATE]
proof (state)
this:
False
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>is_new_action (action_obs E w); mrw_values P Map.empty (map snd (list_of (ltake (enat w) E))) (ad, al) = \<lfloor>(?v'2, True)\<rfloor>\<rbrakk> \<Longrightarrow> False
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence mrw_value_w: "mrw_value P ?vs_prefix (snd (lnth E w)) (ad, al) =
\<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>is_new_action (action_obs E w); mrw_values P Map.empty (map snd (list_of (ltake (enat w) E))) (ad, al) = \<lfloor>(?v'2, True)\<rfloor>\<rbrakk> \<Longrightarrow> False
goal (1 subgoal):
1. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
using \<open>ws a \<in> write_actions E\<close> \<open>(ad, al) \<in> action_loc P E (ws a)\<close> w
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>is_new_action (action_obs E w); mrw_values P Map.empty (map snd (list_of (ltake (enat w) E))) (ad, al) = \<lfloor>(?v'2, True)\<rfloor>\<rbrakk> \<Longrightarrow> False
ws a \<in> write_actions E
(ad, al) \<in> action_loc P E (ws a)
ws a = w
goal (1 subgoal):
1. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
by(cases "snd (lnth E w)" rule: mrw_value_cases)(fastforce elim: write_actions.cases simp add: value_written_def action_obs_def)+
[PROOF STATE]
proof (state)
this:
mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
have "mrw_values P (mrw_value P ?vs_prefix (snd (lnth E w))) (list_of (lmap snd ?between)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mrw_values P (mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w))) (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
proof(subst mrw_values_no_write_unchanged)
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
fix wa
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
assume "wa \<in> set (list_of (lmap snd ?between))"
and write_wa: "is_write_action wa"
and adal_wa: "(ad, al) \<in> action_loc_aux P wa"
[PROOF STATE]
proof (state)
this:
wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))))
is_write_action wa
(ad, al) \<in> action_loc_aux P wa
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
hence wa: "wa \<in> lset (lmap snd ?between)"
[PROOF STATE]
proof (prove)
using this:
wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))))
is_write_action wa
(ad, al) \<in> action_loc_aux P wa
goal (1 subgoal):
1. wa \<in> lset (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
wa \<in> lset (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
from wa
[PROOF STATE]
proof (chain)
picking this:
wa \<in> lset (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
[PROOF STEP]
obtain i_wa where "wa = lnth (lmap snd ?between) i_wa"
and i_wa: "enat i_wa < llength (lmap snd ?between)"
[PROOF STATE]
proof (prove)
using this:
wa \<in> lset (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
goal (1 subgoal):
1. (\<And>i_wa. \<lbrakk>wa = lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) i_wa; enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
unfolding lset_conv_lnth
[PROOF STATE]
proof (prove)
using this:
wa \<in> {lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) n |n. enat n < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))}
goal (1 subgoal):
1. (\<And>i_wa. \<lbrakk>wa = lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) i_wa; enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))\<rbrakk> \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
by blast
[PROOF STATE]
proof (state)
this:
wa = lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) i_wa
enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
wa = lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) i_wa
enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
hence i_wa_len: "enat (Suc (w + i_wa)) < llength E"
[PROOF STATE]
proof (prove)
using this:
wa = lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) i_wa
enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
goal (1 subgoal):
1. enat (Suc (w + i_wa)) < llength E
[PROOF STEP]
by(cases "llength E") auto
[PROOF STATE]
proof (state)
this:
enat (Suc (w + i_wa)) < llength E
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
wa = lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) i_wa
enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
enat (Suc (w + i_wa)) < llength E
[PROOF STEP]
have wa': "wa = action_obs E (Suc (w + i_wa))"
[PROOF STATE]
proof (prove)
using this:
wa = lnth (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))) i_wa
enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
enat (Suc (w + i_wa)) < llength E
goal (1 subgoal):
1. wa = action_obs E (Suc (w + i_wa))
[PROOF STEP]
by(simp_all add: lnth_ltake action_obs_def ac_simps)
[PROOF STATE]
proof (state)
this:
wa = action_obs E (Suc (w + i_wa))
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
with write_wa i_wa_len
[PROOF STATE]
proof (chain)
picking this:
is_write_action wa
enat (Suc (w + i_wa)) < llength E
wa = action_obs E (Suc (w + i_wa))
[PROOF STEP]
have "Suc (w + i_wa) \<in> write_actions E"
[PROOF STATE]
proof (prove)
using this:
is_write_action wa
enat (Suc (w + i_wa)) < llength E
wa = action_obs E (Suc (w + i_wa))
goal (1 subgoal):
1. Suc (w + i_wa) \<in> write_actions E
[PROOF STEP]
by(auto intro: write_actions.intros simp add: actions_def)
[PROOF STATE]
proof (state)
this:
Suc (w + i_wa) \<in> write_actions E
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
from most_recent_write_recent[OF mrw _ this, of "(ad, al)"] adal adal_wa wa'
[PROOF STATE]
proof (chain)
picking this:
\<lbrakk>(ad, al) \<in> action_loc P E a; (ad, al) \<in> action_loc P E (Suc (w + i_wa))\<rbrakk> \<Longrightarrow> E \<turnstile> Suc (w + i_wa) \<le>a w \<or> E \<turnstile> a \<le>a Suc (w + i_wa)
action_obs E a = NormalAction (ReadMem ad al v)
(ad, al) \<in> action_loc_aux P wa
wa = action_obs E (Suc (w + i_wa))
[PROOF STEP]
have "E \<turnstile> Suc (w + i_wa) \<le>a w \<or> E \<turnstile> a \<le>a Suc (w + i_wa)"
[PROOF STATE]
proof (prove)
using this:
\<lbrakk>(ad, al) \<in> action_loc P E a; (ad, al) \<in> action_loc P E (Suc (w + i_wa))\<rbrakk> \<Longrightarrow> E \<turnstile> Suc (w + i_wa) \<le>a w \<or> E \<turnstile> a \<le>a Suc (w + i_wa)
action_obs E a = NormalAction (ReadMem ad al v)
(ad, al) \<in> action_loc_aux P wa
wa = action_obs E (Suc (w + i_wa))
goal (1 subgoal):
1. E \<turnstile> Suc (w + i_wa) \<le>a w \<or> E \<turnstile> a \<le>a Suc (w + i_wa)
[PROOF STEP]
by(simp)
[PROOF STATE]
proof (state)
this:
E \<turnstile> Suc (w + i_wa) \<le>a w \<or> E \<turnstile> a \<le>a Suc (w + i_wa)
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
hence "is_new_action wa \<and> \<not> is_new_action (action_obs E w)"
[PROOF STATE]
proof (prove)
using this:
E \<turnstile> Suc (w + i_wa) \<le>a w \<or> E \<turnstile> a \<le>a Suc (w + i_wa)
goal (1 subgoal):
1. is_new_action wa \<and> \<not> is_new_action (action_obs E w)
[PROOF STEP]
using adal i_wa wa'
[PROOF STATE]
proof (prove)
using this:
E \<turnstile> Suc (w + i_wa) \<le>a w \<or> E \<turnstile> a \<le>a Suc (w + i_wa)
action_obs E a = NormalAction (ReadMem ad al v)
enat i_wa < llength (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))
wa = action_obs E (Suc (w + i_wa))
goal (1 subgoal):
1. is_new_action wa \<and> \<not> is_new_action (action_obs E w)
[PROOF STEP]
by(auto elim: action_orderE)
[PROOF STATE]
proof (state)
this:
is_new_action wa \<and> \<not> is_new_action (action_obs E w)
goal (2 subgoals):
1. \<And>wa. \<lbrakk>wa \<in> set (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))); is_write_action wa; (ad, al) \<in> action_loc_aux P wa\<rbrakk> \<Longrightarrow> case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
2. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
thus "case (mrw_value P ?vs_prefix (snd (lnth E w)) (ad, al)) of None \<Rightarrow> False | Some (v, b) \<Rightarrow> b \<and> is_new_action wa"
[PROOF STATE]
proof (prove)
using this:
is_new_action wa \<and> \<not> is_new_action (action_obs E w)
goal (1 subgoal):
1. case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
[PROOF STEP]
unfolding mrw_value_w
[PROOF STATE]
proof (prove)
using this:
is_new_action wa \<and> \<not> is_new_action (action_obs E w)
goal (1 subgoal):
1. case \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor> of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
case mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) of None \<Rightarrow> False | \<lfloor>(v, b)\<rfloor> \<Rightarrow> b \<and> is_new_action wa
goal (1 subgoal):
1. mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w)) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
[PROOF STEP]
qed(simp add: mrw_value_w)
[PROOF STATE]
proof (state)
this:
mrw_values P (mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w))) (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
moreover
[PROOF STATE]
proof (state)
this:
mrw_values P (mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w))) (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
from a
[PROOF STATE]
proof (chain)
picking this:
a \<in> read_actions E
[PROOF STEP]
have "a \<in> actions E"
[PROOF STATE]
proof (prove)
using this:
a \<in> read_actions E
goal (1 subgoal):
1. a \<in> actions E
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
a \<in> actions E
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence "enat a < llength E"
[PROOF STATE]
proof (prove)
using this:
a \<in> actions E
goal (1 subgoal):
1. enat a < llength E
[PROOF STEP]
by(rule actionsE)
[PROOF STATE]
proof (state)
this:
enat a < llength E
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
with \<open>w < a\<close>
[PROOF STATE]
proof (chain)
picking this:
w < a
enat a < llength E
[PROOF STEP]
have "enat (a - Suc w) < llength E - enat (Suc w)"
[PROOF STATE]
proof (prove)
using this:
w < a
enat a < llength E
goal (1 subgoal):
1. enat (a - Suc w) < llength E - enat (Suc w)
[PROOF STEP]
by(cases "llength E") simp_all
[PROOF STATE]
proof (state)
this:
enat (a - Suc w) < llength E - enat (Suc w)
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence "E = lappend (lappend ?prefix (LCons (lnth E w) ?between)) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))"
[PROOF STATE]
proof (prove)
using this:
enat (a - Suc w) < llength E - enat (Suc w)
goal (1 subgoal):
1. E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))
[PROOF STEP]
using \<open>w < a\<close> \<open>enat a < llength E\<close>
[PROOF STATE]
proof (prove)
using this:
enat (a - Suc w) < llength E - enat (Suc w)
w < a
enat a < llength E
goal (1 subgoal):
1. E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))
[PROOF STEP]
unfolding lappend_assoc lappend_code
[PROOF STATE]
proof (prove)
using this:
enat (a - Suc w) < llength E - enat (Suc w)
w < a
enat a < llength E
goal (1 subgoal):
1. E = lappend (ltake (enat w) E) (LCons (lnth E w) (lappend (ltake (enat (a - Suc w)) (ldropn (Suc w) E)) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))))
[PROOF STEP]
apply(subst ldropn_Suc_conv_ldropn, simp)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>enat (a - Suc w) < llength E - enat (Suc w); w < a; enat a < llength E\<rbrakk> \<Longrightarrow> E = lappend (ltake (enat w) E) (LCons (lnth E w) (lappend (ltake (enat (a - Suc w)) (ldropn (Suc w) E)) (ldropn (a - Suc w) (ldropn (Suc w) E))))
[PROOF STEP]
apply(subst lappend_ltake_enat_ldropn)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>enat (a - Suc w) < llength E - enat (Suc w); w < a; enat a < llength E\<rbrakk> \<Longrightarrow> E = lappend (ltake (enat w) E) (LCons (lnth E w) (ldropn (Suc w) E))
[PROOF STEP]
apply(subst ldropn_Suc_conv_ldropn, simp add: less_trans[where y="enat a"])
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>enat (a - Suc w) < llength E - enat (Suc w); w < a; enat a < llength E\<rbrakk> \<Longrightarrow> E = lappend (ltake (enat w) E) (ldropn w E)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
hence E': "E = lappend (lappend ?prefix (LCons (lnth E w) ?between)) (LCons (lnth E a) (ldropn (Suc a) E))"
[PROOF STATE]
proof (prove)
using this:
E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))
goal (1 subgoal):
1. E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth E a) (ldropn (Suc a) E))
[PROOF STEP]
using \<open>w < a\<close> \<open>enat a < llength E\<close>
[PROOF STATE]
proof (prove)
using this:
E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth (ldropn (Suc w) E) (a - Suc w)) (ldropn (Suc (a - Suc w)) (ldropn (Suc w) E)))
w < a
enat a < llength E
goal (1 subgoal):
1. E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth E a) (ldropn (Suc a) E))
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
E = lappend (lappend (ltake (enat w) E) (LCons (lnth E w) (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (LCons (lnth E a) (ldropn (Suc a) E))
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
from seq
[PROOF STATE]
proof (chain)
picking this:
ta_seq_consist P Map.empty (lmap snd E)
[PROOF STEP]
have "ta_seq_consist P (mrw_values P Map.empty (list_of (lappend (lmap snd ?prefix) (LCons (snd (lnth E w)) (lmap snd ?between))))) (lmap snd (LCons (lnth E a) (ldropn (Suc a) E)))"
[PROOF STATE]
proof (prove)
using this:
ta_seq_consist P Map.empty (lmap snd E)
goal (1 subgoal):
1. ta_seq_consist P (mrw_values P Map.empty (list_of (lappend (lmap snd (ltake (enat w) E)) (LCons (snd (lnth E w)) (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))))))) (lmap snd (LCons (lnth E a) (ldropn (Suc a) E)))
[PROOF STEP]
by(subst (asm) E')(simp add: lmap_lappend_distrib ta_seq_consist_lappend)
[PROOF STATE]
proof (state)
this:
ta_seq_consist P (mrw_values P Map.empty (list_of (lappend (lmap snd (ltake (enat w) E)) (LCons (snd (lnth E w)) (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))))))) (lmap snd (LCons (lnth E a) (ldropn (Suc a) E)))
goal (4 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v)\<rbrakk> \<Longrightarrow> value_written P E (ws a) (ad, al) = v
2. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
ultimately
[PROOF STATE]
proof (chain)
picking this:
mrw_values P (mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w))) (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
ta_seq_consist P (mrw_values P Map.empty (list_of (lappend (lmap snd (ltake (enat w) E)) (LCons (snd (lnth E w)) (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))))))) (lmap snd (LCons (lnth E a) (ldropn (Suc a) E)))
[PROOF STEP]
show "value_written P E (ws a) (ad, al) = v"
[PROOF STATE]
proof (prove)
using this:
mrw_values P (mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w))) (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
ta_seq_consist P (mrw_values P Map.empty (list_of (lappend (lmap snd (ltake (enat w) E)) (LCons (snd (lnth E w)) (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))))))) (lmap snd (LCons (lnth E a) (ldropn (Suc a) E)))
goal (1 subgoal):
1. value_written P E (ws a) (ad, al) = v
[PROOF STEP]
using adal w
[PROOF STATE]
proof (prove)
using this:
mrw_values P (mrw_value P (mrw_values P Map.empty (map snd (list_of (ltake (enat w) E)))) (snd (lnth E w))) (list_of (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E)))) (ad, al) = \<lfloor>(value_written P E w (ad, al), \<not> is_new_action (action_obs E w))\<rfloor>
ta_seq_consist P (mrw_values P Map.empty (list_of (lappend (lmap snd (ltake (enat w) E)) (LCons (snd (lnth E w)) (lmap snd (ltake (enat (a - Suc w)) (ldropn (Suc w) E))))))) (lmap snd (LCons (lnth E a) (ldropn (Suc a) E)))
action_obs E a = NormalAction (ReadMem ad al v)
ws a = w
goal (1 subgoal):
1. value_written P E (ws a) (ad, al) = v
[PROOF STEP]
by(clarsimp simp add: action_obs_def list_of_lappend list_of_LCons)
[PROOF STATE]
proof (state)
this:
value_written P E (ws a) (ad, al) = v
goal (3 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
(* assume "is_volatile P al" *)
[PROOF STATE]
proof (state)
this:
value_written P E (ws a) (ad, al) = v
goal (3 subgoals):
1. \<And>a ad al v. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); is_volatile P al\<rbrakk> \<Longrightarrow> \<not> P,E \<turnstile> a \<le>so ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
show "\<not> P,E \<turnstile> a \<le>so ws a"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<not> P,E \<turnstile> a \<le>so ws a
[PROOF STEP]
using \<open>w < a\<close> w adal
[PROOF STATE]
proof (prove)
using this:
w < a
ws a = w
action_obs E a = NormalAction (ReadMem ad al v)
goal (1 subgoal):
1. \<not> P,E \<turnstile> a \<le>so ws a
[PROOF STEP]
by(auto elim!: action_orderE sync_orderE)
[PROOF STATE]
proof (state)
this:
\<not> P,E \<turnstile> a \<le>so ws a
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
fix a'
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
assume a': "a' \<in> write_actions E" "(ad, al) \<in> action_loc P E a'"
[PROOF STATE]
proof (state)
this:
a' \<in> write_actions E
(ad, al) \<in> action_loc P E a'
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
{
[PROOF STATE]
proof (state)
this:
a' \<in> write_actions E
(ad, al) \<in> action_loc P E a'
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
presume "E \<turnstile> ws a \<le>a a'" "E \<turnstile> a' \<le>a a"
[PROOF STATE]
proof (state)
this:
E \<turnstile> ws a \<le>a a'
E \<turnstile> a' \<le>a a
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
with mrw adal a'
[PROOF STATE]
proof (chain)
picking this:
P,E \<turnstile> a \<leadsto>mrw w
action_obs E a = NormalAction (ReadMem ad al v)
a' \<in> write_actions E
(ad, al) \<in> action_loc P E a'
E \<turnstile> ws a \<le>a a'
E \<turnstile> a' \<le>a a
[PROOF STEP]
have "a' = ws a"
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> a \<leadsto>mrw w
action_obs E a = NormalAction (ReadMem ad al v)
a' \<in> write_actions E
(ad, al) \<in> action_loc P E a'
E \<turnstile> ws a \<le>a a'
E \<turnstile> a' \<le>a a
goal (1 subgoal):
1. a' = ws a
[PROOF STEP]
unfolding w
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> a \<leadsto>mrw w
action_obs E a = NormalAction (ReadMem ad al v)
a' \<in> write_actions E
(ad, al) \<in> action_loc P E a'
E \<turnstile> w \<le>a a'
E \<turnstile> a' \<le>a a
goal (1 subgoal):
1. a' = w
[PROOF STEP]
by cases(fastforce dest: antisymPD[OF antisym_action_order] read_actions_not_write_actions elim!: meta_allE[where x=a'])
[PROOF STATE]
proof (state)
this:
a' = ws a
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> a' = ws a
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> a' = ws a
[PROOF STEP]
thus "a' = ws a" "a' = ws a"
[PROOF STATE]
proof (prove)
using this:
a' = ws a
goal (1 subgoal):
1. a' = ws a &&& a' = ws a
[PROOF STEP]
by -
[PROOF STATE]
proof (state)
this:
a' = ws a
a' = ws a
goal (4 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (4 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
[PROOF STEP]
assume "P,E \<turnstile> ws a \<le>hb a'" "P,E \<turnstile> a' \<le>hb a"
[PROOF STATE]
proof (state)
this:
P,E \<turnstile> ws a \<le>hb a'
P,E \<turnstile> a' \<le>hb a
goal (4 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; P,E \<turnstile> ws a \<le>hb a'; P,E \<turnstile> a' \<le>hb a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
3. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
4. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
[PROOF STEP]
thus "E \<turnstile> ws a \<le>a a'" "E \<turnstile> a' \<le>a a"
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> ws a \<le>hb a'
P,E \<turnstile> a' \<le>hb a
goal (1 subgoal):
1. E \<turnstile> ws a \<le>a a' &&& E \<turnstile> a' \<le>a a
[PROOF STEP]
using a'
[PROOF STATE]
proof (prove)
using this:
P,E \<turnstile> ws a \<le>hb a'
P,E \<turnstile> a' \<le>hb a
a' \<in> write_actions E
(ad, al) \<in> action_loc P E a'
goal (1 subgoal):
1. E \<turnstile> ws a \<le>a a' &&& E \<turnstile> a' \<le>a a
[PROOF STEP]
by(blast intro: happens_before_into_action_order)+
[PROOF STATE]
proof (state)
this:
E \<turnstile> ws a \<le>a a'
E \<turnstile> a' \<le>a a
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
[PROOF STEP]
assume "is_volatile P al" "P,E \<turnstile> ws a \<le>so a'" "P,E \<turnstile> a' \<le>so a"
[PROOF STATE]
proof (state)
this:
is_volatile P al
P,E \<turnstile> ws a \<le>so a'
P,E \<turnstile> a' \<le>so a
goal (2 subgoals):
1. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
2. \<And>a ad al v a'. \<lbrakk>a \<in> read_actions E; action_obs E a = NormalAction (ReadMem ad al v); a' \<in> write_actions E; (ad, al) \<in> action_loc P E a'; is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
[PROOF STEP]
thus "E \<turnstile> ws a \<le>a a'" "E \<turnstile> a' \<le>a a"
[PROOF STATE]
proof (prove)
using this:
is_volatile P al
P,E \<turnstile> ws a \<le>so a'
P,E \<turnstile> a' \<le>so a
goal (1 subgoal):
1. E \<turnstile> ws a \<le>a a' &&& E \<turnstile> a' \<le>a a
[PROOF STEP]
by(auto elim: sync_orderE)
[PROOF STATE]
proof (state)
this:
E \<turnstile> ws a \<le>a a'
E \<turnstile> a' \<le>a a
goal:
No subgoals!
[PROOF STEP]
}
[PROOF STATE]
proof (state)
this:
\<lbrakk>is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> ws a \<le>a a'
\<lbrakk>is_volatile P al; P,E \<turnstile> ws a \<le>so a'; P,E \<turnstile> a' \<le>so a\<rbrakk> \<Longrightarrow> E \<turnstile> a' \<le>a a
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
is_write_seen P E ws
goal (1 subgoal):
1. thread_start_actions_ok E
[PROOF STEP]
qed(rule tsa_ok)
[PROOF STATE]
proof (state)
this:
P \<turnstile> (E, ws) \<surd>
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 37453, "file": "JinjaThreads_MM_SC_Completion", "length": 150} |
import yaml
import argparse
from collections import deque
from nes_py.wrappers import JoypadSpace
import gym_super_mario_bros
from gym_super_mario_bros.actions import SIMPLE_MOVEMENT
from models import DeepQ
from collections.abc import Iterable
from utils import AverageMeter
import torch
import random
import copy
import numpy as np
import torch.nn.functional as F
class MarioGame:
def __init__(self, model=DeepQ, optimizer=None, criterion=None,
render=False,world=1, stage=1, version=1, args=None):
assert 1 <= world <= 8
assert 1 <= stage <= 4
assert 0 <= version <= 3
env_s = f'SuperMarioBros-{world}-{stage}-v{version}'
env = gym_super_mario_bros.make(env_s)
self.env = JoypadSpace(env, SIMPLE_MOVEMENT)
#self.env = JoypadSpace(env, self.action_space)
self.action_space = ['NOOP', 'right', 'right A', 'right B', 'right A B', 'A', 'left']
self.render = args.render
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.args = args
self.device = self.args.device
self.total_steps = 0
self.batch_size = self.args.batch_size
self.replay_buf = deque(maxlen=self.args.replay_buffer_size)
# Action space is Discrete(7)
# Action means ['NOOP', 'right', 'right A', 'right B', 'right A B', 'A', 'left']
# State shape is (240, 256, 3)
def game(self, epsilon=0):
done = False
info = {'x_pos': 40, 'y_pos': 79, 'time': 400}
reward = 0
reward_eps = 0
state = self.env.reset()
step = 0
loss_eps = AverageMeter()
stopEps = StopNonProEps(step_thres=self.args.step_thres)
action = None
while not done:
state_prev, _, _ = self.preprocess(state=state, action_prev=action, info=info)
action = self.model(state_prev, epsilon)
state, reward, done, info = self.step(action)
reward_eps += reward
if self.model.training:
# samples sars from replay buffer, for now samples if deque size > batch_size
# This might lead to earlier samples to be sampled much more than later samples and shift the distribution
reward, done = stopEps.action(reward, done, info)
state_next, reward, ndone = self.preprocess(state=state, reward=reward, action_prev= action, done=done, info = info)
self.buffer_update(state_prev, action, reward, ndone, state_next)
if step % self.batch_size == 0 and len(self.replay_buf) >= self.batch_size*16:
state_t, action_t, reward_t, ndone_t, state_next_t = self.sample()
# Calculate loss and optimize the models
# using a tuple to pass actor and critic for gradient clipping
loss_batch = self.model_update(state_t, action_t, reward_t, ndone_t, state_next_t)
loss_eps.update(loss_batch)
if (step // self.args.batch_size) % self.args.print_every == 0:
print(f"\tStep [{step}]/XXX "
f"Loss : {loss_batch:.4f}"
f"\tTotal rewards : {reward_eps}\tepsilon : {epsilon:.2f}")
with torch.no_grad():
qsa = self.model.QSA_target((state_t[0][0:5, :], state_t[1][0:5, :]))
print("\tQSA:\n\t"+str(qsa[0])+"\n\t"+str(qsa[1])+"\n\t"+str(qsa[2])+"\n\t"+str(qsa[3]))
print("\tHistorgam of last batch actions: " +
str(torch.histc(action_t.float(), bins=self.args.action_len, min=0, max=self.args.action_len)))
if self.total_steps % self.args.update_target_every == 0 or done:
# only actor is needed for the target network, avoid copying the replay buffer
self.model.QSA_target = copy.deepcopy(self.model.QSA_online)
self.model.QSA_target.eval()
#self.copy_counter = 0
self.total_steps += 1
#if self.render:
# self.env.render()
step += 1
return reward_eps, loss_eps.avg, step, self.total_steps
def model_update(self, state_t, action_t, reward_t, done_t, state_next_t):
loss = self.criterion(state_t, action_t, reward_t, done_t, state_next_t)
self.optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(self.model.QSA_online.parameters(), self.args.clip_grad)
self.optimizer.step()
return loss.item()
def step(self, action):
reward_eps = 0
for i in range(self.args.skip_frame_cnt):
state, reward, done, info = self.env.step(action)
reward_eps += float(reward)
if self.render:
self.env.render()
if done:
return state, reward, done, info
return state, reward_eps, done, info
def reset(self):
self.env.reset()
def stop(self):
self.env.close()
def preprocess(self, state, reward=0, action_prev=None, info = None, done=False):
################################################
# function to format to the environment variables appropriately so that it can be fed in to NN
###############################################
# state tensor size is now 1x1x120x128
state_processed = torch.FloatTensor(np.mean(state[::self.args.downsample, ::self.args.downsample, :], axis=2)/128.0-1).unsqueeze(dim=0)
action_tensor = torch.zeros((len(self.action_space),))
if action_prev:
action_tensor[action_prev] = 1.0
info_tensor = torch.zeros((3,))
if info:
info_tensor[0] = info['x_pos']/120.0-1
info_tensor[1] = info['y_pos']/128.0-1
info_tensor[2] = info['time']/200-1
action_info = torch.cat((action_tensor, info_tensor)).unsqueeze(dim = 0)
# normalize reward, reward varies from -15 to 15
# Game objective is to move as far as right as possible, increasing the penatly for deatch bu done_mult
if done:
reward += -15*self.args.die_mult
reward_processed = torch.unsqueeze(torch.tensor(reward)/(15*(1+self.args.die_mult)*self.args.skip_frame_cnt), dim=0).to(self.device)
ndone_processed = torch.tensor(not done).unsqueeze(dim=0).to(self.device)
return [state_processed, action_info], reward_processed, ndone_processed
def buffer_update(self, state_t, action_t, reward_t, done_t, state_tp1):
item = (state_t, action_t, reward_t, done_t, state_tp1)
self.replay_buf.append(item)
def sample(self):
if len(self.replay_buf) < self.batch_size:
return None, None, None, None
rand_indices = random.sample(range(len(self.replay_buf)), k=self.batch_size)
state = torch.cat([self.replay_buf[i][0][0] for i in rand_indices], dim=0), torch.cat([self.replay_buf[i][0][1] for i in rand_indices], dim=0)
action = torch.unsqueeze(torch.LongTensor([self.replay_buf[i][1] for i in rand_indices]), dim = 1).to(self.device)
reward = torch.cat([self.replay_buf[i][2] for i in rand_indices], dim=0)
done = torch.cat([self.replay_buf[i][3] for i in rand_indices], dim=0)
state_next = torch.cat([self.replay_buf[i][4][0] for i in rand_indices], dim=0), torch.cat([self.replay_buf[i][4][1] for i in rand_indices], dim=0)
return state, action, reward, done, state_next
class StopNonProEps:
def __init__(self, step_thres):
self.xpos = -1
self.count = 0
self.step_thres = step_thres
def action(self, reward, done, info):
if self.xpos == info['x_pos']:
self.count += 1
if self.count == self.step_thres:
reward = -15
done = True
else:
self.count = 0
self.xpos = info['x_pos']
return reward, done
def main():
parser = argparse.ArgumentParser(description='CS7643 deep_pipes')
parser.add_argument('--config', default='./configs/config_ActorCritic.yaml')
parser.add_argument('--device', default=torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu'))
global args
args = parser.parse_args()
with open(args.config) as f:
config = yaml.load(f, yaml.SafeLoader)
for key in config:
for k, v in config[key].items():
setattr(args, k, v)
MG = MarioGame(args=args)
state = MG.env.reset()
done = False
action = 1
count = 1
while not done:
i = 0
while i < count and not done:
state, reward, done, info = MG.action_wrapper(action)
MG.env.render()
i += 1
print(reward)
def main2():
env_s = f'SuperMarioBros-1-1-v1'
env = gym_super_mario_bros.make(env_s)
env = JoypadSpace(env,[["NOOP"], ["right"], ["left"], ["A"]])
# Action means ['NOOP', 'right', 'right A', 'right B', 'right A B', 'A', 'left']
state = env.reset()
done = False
count = 1
while not done:
i = 0
while i < count and not done:
state, reward, done, info = env.step(action)
env.render()
i += 1
print(reward)
if __name__ == '__main__':
main2()
| {"hexsha": "9694053559b45a542e2aa9b3328b475024277b44", "size": 9477, "ext": "py", "lang": "Python", "max_stars_repo_path": "game_DQ.py", "max_stars_repo_name": "realAsma/SuperMario_DeepQ", "max_stars_repo_head_hexsha": "ff39590dc7d553602cd55f2c4bd550ffa2f7fdf1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "game_DQ.py", "max_issues_repo_name": "realAsma/SuperMario_DeepQ", "max_issues_repo_head_hexsha": "ff39590dc7d553602cd55f2c4bd550ffa2f7fdf1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "game_DQ.py", "max_forks_repo_name": "realAsma/SuperMario_DeepQ", "max_forks_repo_head_hexsha": "ff39590dc7d553602cd55f2c4bd550ffa2f7fdf1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4875, "max_line_length": 155, "alphanum_fraction": 0.591642925, "include": true, "reason": "import numpy", "num_tokens": 2326} |
# --------------------------------------------------------
# Tensorflow VCL
# Licensed under The MIT License [see LICENSE for details]
# Written by Zhi Hou
# --------------------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.contrib.slim import arg_scope
from tensorflow.contrib.slim.python.slim.nets import resnet_utils
from tensorflow.contrib.slim.python.slim.nets import resnet_v1
from tensorflow.contrib.layers.python.layers import layers
from tensorflow.contrib.layers.python.layers import regularizers
from tensorflow.python.ops import nn_ops
from tensorflow.contrib.layers.python.layers import initializers
from tensorflow.python.framework import ops
from networks.Fabricator import Fabricator
from ult.config import cfg
import numpy as np
import math
import os
# print(os.environ['DATASET'])
if 'DATASET' not in os.environ or os.environ['DATASET'] == 'HICO':
from networks.ResNet50_HICO import ResNet50, resnet_arg_scope
parent_model = ResNet50
elif os.environ['DATASET'] == 'HICO_res101':
from networks.ResNet101_HICO import ResNet101, resnet_arg_scope
parent_model = ResNet101
elif os.environ['DATASET'] == 'VCOCO1':
from networks.ResNet50_VCOCO_HOI import ResNet50, resnet_arg_scope
parent_model = ResNet50
else:
from networks.ResNet50_VCOCO import ResNet50, resnet_arg_scope
parent_model = ResNet50
class HOI(parent_model):
def __init__(self, model_name='VCL_union_multi_ml5_def1_l2_rew2_aug5_3_x5new_res101'):
super(HOI, self).__init__(model_name)
self.pos1_idx = None
import pickle
self.update_ops = []
self.feature_gen = Fabricator(self)
self.gt_class_HO_for_G_verbs = None
self.gt_class_HO_for_D_verbs = None
self.losses['fake_D_total_loss'] = 0
self.losses['fake_G_total_loss'] = 0
self.losses['fake_total_loss'] = 0
def set_gt_class_HO_for_G_verbs(self, gt_class_HO_for_G_verbs):
self.gt_class_HO_for_G_verbs = gt_class_HO_for_G_verbs
def set_gt_class_HO_for_D_verbs(self, gt_class_HO_for_D_verbs):
self.gt_class_HO_for_D_verbs = gt_class_HO_for_D_verbs
def set_add_ph(self, pos1_idx=None):
self.pos1_idx = pos1_idx
def res5_ho(self, pool5_HO, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
if self.model_name.__contains__('unique_weights'):
print("unique_weights")
st = -3
reuse = tf.AUTO_REUSE
if name != 'res5':
reuse = True
else:
st = -2
reuse = tf.AUTO_REUSE
fc7_HO, _ = resnet_v1.resnet_v1(pool5_HO,
self.blocks[st:st+1],
global_pool=False,
include_root_block=False,
reuse=reuse,
scope=self.scope)
return fc7_HO
def head_to_tail_ho(self, fc7_O, fc7_verbs, fc7_O_raw, fc7_verbs_raw, is_training, name):
if name == 'fc_HO':
nameprefix = '' # TODO should improve
else:
nameprefix = name
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
print('others concat')
concat_hoi = tf.concat([fc7_verbs, fc7_O], 1) # TODO fix
print(concat_hoi)
concat_hoi = slim.fully_connected(concat_hoi, self.num_fc, reuse=tf.AUTO_REUSE, scope=nameprefix+'Concat_verbs')
concat_hoi = slim.dropout(concat_hoi, keep_prob=0.5, is_training=is_training,
scope=nameprefix+'dropout6_verbs')
fc9_hoi = slim.fully_connected(concat_hoi, self.num_fc, reuse=tf.AUTO_REUSE, scope=nameprefix+'fc7_verbs')
fc9_hoi = slim.dropout(fc9_hoi, keep_prob=0.5, is_training=is_training, scope=nameprefix+'dropout7_verbs')
return fc9_hoi
def head_to_tail_sp(self, fc7_H, fc7_O, sp, is_training, name):
with slim.arg_scope(resnet_arg_scope(is_training=is_training)):
Concat_SHsp = tf.concat([fc7_H, sp], 1)
Concat_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, reuse=tf.AUTO_REUSE, scope='Concat_SHsp')
Concat_SHsp = slim.dropout(Concat_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout6_SHsp')
fc7_SHsp = slim.fully_connected(Concat_SHsp, self.num_fc, reuse=tf.AUTO_REUSE, scope='fc7_SHsp')
fc7_SHsp = slim.dropout(fc7_SHsp, keep_prob=0.5, is_training=is_training, scope='dropout7_SHsp')
return fc7_SHsp
def region_classification_sp(self, fc7_SHsp, is_training, initializer, name):
with tf.variable_scope(name) as scope:
cls_score_sp = slim.fully_connected(fc7_SHsp, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
reuse=tf.AUTO_REUSE,
activation_fn=None, scope='cls_score_sp')
cls_prob_sp = tf.nn.sigmoid(cls_score_sp, name='cls_prob_sp')
tf.reshape(cls_prob_sp, [-1, self.num_classes])
self.predictions["cls_score_sp"] = cls_score_sp
self.predictions["cls_prob_sp"] = cls_prob_sp
return cls_prob_sp
def region_classification_ho(self, fc7_verbs, is_training, initializer, name, nameprefix = ''):
# if not self.model_name.startswith('VCL_') and not self.model_name.__contains__('_orig_'):
# return None
with tf.variable_scope(name) as scope:
cls_score_hoi = slim.fully_connected(fc7_verbs, self.num_classes,
weights_initializer=initializer,
trainable=is_training,
reuse=tf.AUTO_REUSE,
activation_fn=None, scope='cls_score_verbs')
cls_prob_hoi = tf.nn.sigmoid(cls_score_hoi, name='cls_prob_verbs')
self.predictions[nameprefix+"cls_score_hoi"] = cls_score_hoi
self.predictions[nameprefix+"cls_prob_hoi"] = cls_prob_hoi
if self.model_name.__contains__("VCOCO"):
# if self.model_name.__contains__('_CL_'):
# assert self.num_classes == 222
# print(cls_score_hoi, '=============================================')
if self.model_name.__contains__("VCL_V"):
self.predictions[nameprefix + "cls_prob_HO"] = cls_prob_hoi if nameprefix == '' else 0
else:
self.predictions[nameprefix+"cls_prob_HO"] = self.predictions["cls_prob_sp"] * cls_prob_hoi if nameprefix =='' else 0
return cls_prob_hoi
def get_compose_boxes(self, h_boxes, o_boxes):
with tf.control_dependencies([tf.assert_equal(h_boxes[:, 0], o_boxes[:, 0],
data=[h_boxes[:, 0], o_boxes[:, 0]])]):
cboxes1 = tf.minimum(tf.slice(h_boxes, [0, 0], [-1, 3]),
tf.slice(o_boxes, [0, 0], [-1, 3]))
cboxes2 = tf.maximum(tf.slice(h_boxes, [0, 3], [-1, 2]),
tf.slice(o_boxes, [0, 3], [-1, 2]))
cboxes = tf.concat(values=[cboxes1, cboxes2], axis=1)
return cboxes
def verbs_loss(self, fc7_verbs, is_training, initializer, label='', ):
with tf.variable_scope('verbs_loss', reuse=tf.AUTO_REUSE):
cls_verbs = fc7_verbs
# cls_verbs = slim.fully_connected(cls_verbs, self.num_fc, scope='fc8_cls_verbs')
# cls_verbs = slim.dropout(cls_verbs, keep_prob=0.5, is_training=is_training, scope='dropout8_cls_verbs')
# fc9_verbs = slim.fully_connected(cls_verbs, self.num_fc, scope='fc9_cls_verbs')
# verbs_cls = slim.dropout(fc9_verbs, keep_prob=0.5, is_training=is_training, scope='dropout9_cls_verbs')
verbs_cls_score = slim.fully_connected(cls_verbs, self.verb_num_classes,
weights_initializer=initializer,
trainable=is_training,
reuse=tf.AUTO_REUSE,
activation_fn=None, scope='verbs_cls_score')
verb_cls_prob = tf.nn.sigmoid(verbs_cls_score, name='verb_cls_prob')
tf.reshape(verb_cls_prob, [-1, self.verb_num_classes])
self.predictions["verb_cls_score" + label] = verbs_cls_score
self.predictions["verb_cls_prob" + label] = verb_cls_prob
# We do not use this.
def objects_loss(self, input_feature, is_training, initializer, name='objects_loss', label='', is_stop_grads=False):
with tf.variable_scope(name):
print('objects_loss:', self.model_name)
if is_stop_grads:
input_feature = tf.stop_gradient(input_feature)
# cls_verbs = slim.fully_connected(cls_verbs, self.num_fc, scope='fc8_cls_verbs')
# cls_verbs = slim.dropout(cls_verbs, keep_prob=0.5, is_training=is_training, scope='dropout8_cls_verbs')
# fc9_verbs = slim.fully_connected(cls_verbs, self.num_fc, scope='fc9_cls_verbs')
# verbs_cls = slim.dropout(fc9_verbs, keep_prob=0.5, is_training=is_training, scope='dropout9_cls_verbs')
obj_cls_score = slim.fully_connected(input_feature, self.obj_num_classes,
weights_initializer=initializer,
trainable=is_training,
reuse=tf.AUTO_REUSE,
activation_fn=None, scope='obj_cls_score')
obj_cls_prob = tf.nn.sigmoid(obj_cls_score, name='obj_cls_prob')
tf.reshape(obj_cls_prob, [-1, self.obj_num_classes])
self.predictions["obj_cls_score" + label] = obj_cls_score
self.predictions["obj_cls_prob" + label] = obj_cls_prob
def build_network(self, is_training):
initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)
num_stop = tf.cast(self.get_num_stop(), tf.int32)
# ResNet Backbone
head = self.image_to_head(is_training)
sp = self.sp_to_head()
cboxes = self.get_compose_boxes(self.H_boxes[:num_stop] if self.model_name.__contains__('VCOCO') else self.H_boxes, self.O_boxes)
pool5_O = self.crop_pool_layer(head, self.O_boxes, 'Crop_O')
pool5_H = self.crop_pool_layer(head, self.H_boxes, 'Crop_H')
cboxes = cboxes[:num_stop]
pool5_HO = self.extract_pool5_HO(head, cboxes, is_training, pool5_O, None, name='ho_')
# further resnet feature
fc7_H_raw, fc7_O_raw = self.res5(pool5_H, pool5_O, None, is_training, 'res5')
fc7_H = tf.reduce_mean(fc7_H_raw, axis=[1, 2])
fc7_O = tf.reduce_mean(fc7_O_raw, axis=[1, 2])
fc7_H_pos = fc7_H[:num_stop]
fc7_O_pos = fc7_O[:num_stop]
fc7_HO_raw = self.res5_ho(pool5_HO, is_training, 'res5')
fc7_HO = None if fc7_HO_raw is None else tf.reduce_mean(fc7_HO_raw, axis=[1, 2])
if not is_training:
# add visualization for test
self.add_visual_for_test(fc7_HO_raw, fc7_H_raw, fc7_O_raw, head, is_training, pool5_O)
fc7_verbs_raw = fc7_HO_raw
fc7_verbs = fc7_HO
self.score_summaries.update({'orth_HO': fc7_HO,
'orth_H': fc7_H, 'orth_O': fc7_O})
if self.model_name.__contains__('_orig_'):
print('ICAN original code')
# Phi
head_phi = slim.conv2d(head, 512, [1, 1], scope='head_phi')
# g
head_g = slim.conv2d(head, 512, [1, 1], scope='head_g')
Att_H = self.attention_pool_layer_H(head_phi, fc7_H, is_training, 'Att_H')
Att_H = self.attention_norm_H(Att_H, 'Norm_Att_H')
att_head_H = tf.multiply(head_g, Att_H)
Att_O = self.attention_pool_layer_O(head_phi, fc7_O_pos, is_training, 'Att_O')
Att_O = self.attention_norm_O(Att_O, 'Norm_Att_O')
att_head_O = tf.multiply(head_g, Att_O)
pool5_SH = self.bottleneck(att_head_H, is_training, 'bottleneck', False)
pool5_SO = self.bottleneck(att_head_O, is_training, 'bottleneck', True)
fc7_SH, fc7_SO, fc7_SHsp = self.head_to_tail(fc7_H, fc7_O_pos, pool5_SH, pool5_SO, sp, is_training, 'fc_HO')
cls_prob_H, cls_prob_O, cls_prob_sp = self.region_classification(fc7_SH, fc7_SO, fc7_SHsp, is_training,
initializer, 'classification')
elif not self.model_name.startswith('_V_'):
print('sp', sp)
fc7_SHsp = self.head_to_tail_sp(fc7_H, fc7_O, sp, is_training, 'fc_HO')
cls_prob_sp = self.region_classification_sp(fc7_SHsp, is_training, initializer, 'classification')
print("sp:", fc7_SHsp)
else:
fc7_SHsp = self.head_to_tail_sp(fc7_H, fc7_O, sp, is_training, 'fc_HO')
cls_prob_sp = self.region_classification_sp(fc7_SHsp, is_training, initializer, 'classification')
self.additional_loss(fc7_O, fc7_H_pos, fc7_verbs, fc7_verbs_raw, initializer, is_training)
print('verbs')
if not is_training:
self.test_visualize['fc7_O_feats'] = fc7_O
self.test_visualize['fc7_verbs_feats'] = fc7_verbs
self.test_visualize['fc7_H_feats'] = fc7_H_pos
self.intermediate['fc7_O'] = fc7_O[:num_stop]
self.intermediate['fc7_verbs'] = fc7_verbs[:num_stop]
if is_training and self.model_name.__contains__('gan'):
# if model_name contains gan, we will use fabricator.
# here, gan do not mean that we use generative adversarial network.
# We just was planning to use to GAN. But, it is useless.
# Possibly, it is too difficult to tune the network with gan.
gt_class = self.gt_class_HO[:num_stop]
tmp_fc7_O = fc7_O[:num_stop]
tmp_fc7_verbs = fc7_verbs[:num_stop]
tmp_O_raw = fc7_O_raw[:num_stop]
if self.model_name.__contains__('batch') and self.model_name.__contains__('atl'):
tmp_O_raw = fc7_O[:num_stop]
tmp_gt_class = gt_class
# remove object list
semi_filter = tf.reduce_sum(self.H_boxes[:tf.shape(tmp_fc7_O)[0], 1:], axis=-1)
semi_filter = tf.cast(semi_filter, tf.bool)
gt_class = tf.boolean_mask(gt_class, semi_filter, axis=0)
tmp_fc7_O = tf.boolean_mask(tmp_fc7_O, semi_filter, axis=0)
tmp_fc7_verbs = tf.boolean_mask(tmp_fc7_verbs, semi_filter, axis=0)
fc7_O, fc7_verbs = self.feature_gen.fabricate_model(tmp_fc7_O, tmp_O_raw,
tmp_fc7_verbs, fc7_verbs_raw[:num_stop], initializer, is_training,
gt_class)
# if self.model_name.__contains__('laobj'):
# # this aims to evaluate the effect of regularizing fabricated object features, we do not use.
# all_fc7_O = fc7_O
# tmp_class = self.get_hoi_labels()
# self.gt_obj_class = tf.cast(
# tf.matmul(tmp_class, self.obj_to_HO_matrix, transpose_b=True) > 0,
# tf.float32)
# self.objects_loss(all_fc7_O, is_training, initializer, 'objects_loss', label='_o')
# pass
else:
if 'FEATS' in os.environ and self.model_name.__contains__(
'gan'):
# This is only for visualization
gt_class = self.gt_class_HO if not self.model_name.__contains__(
'VCOCO') else self.gt_compose[:num_stop]
old_fc7_O = fc7_O
fc7_O, fc7_verbs = self.feature_gen.fabricate_model(fc7_O, None,
fc7_verbs, fc7_verbs, initializer,
True,
gt_class)
with tf.device("/cpu:0"): fc7_O = tf.Print(fc7_O, [tf.shape(fc7_O), num_stop, tf.shape(self.H_boxes), tf.shape(old_fc7_O), ],
'after gan:', first_n=100, summarize=10000)
if self.model_name.__contains__('varv'):
self.test_visualize['fc7_fake_O_feats'] = fc7_verbs[tf.shape(old_fc7_O)[0]:]
else:
self.test_visualize['fc7_fake_O_feats'] = fc7_O[tf.shape(old_fc7_O)[0]:]
pass
fc7_O = fc7_O[:num_stop]
fc7_verbs = fc7_verbs[:num_stop]
fc7_vo = self.head_to_tail_ho(fc7_O, fc7_verbs, fc7_O_raw, fc7_verbs_raw, is_training, 'fc_HO')
cls_prob_verbs = self.region_classification_ho(fc7_vo, is_training, initializer, 'classification')
if self.model_name.__contains__('_l0_') or self.model_name.__contains__('_scale_'):
"""
This is for factorized model.
"""
verb_prob = self.predictions['verb_cls_prob']
obj_prob = self.predictions["obj_cls_prob_o"]
print(verb_prob, obj_prob)
tmp_fc7_O_vectors = tf.cast(
tf.matmul(obj_prob, self.obj_to_HO_matrix) > 0,
tf.float32)
tmp_fc7_verbs_vectors = tf.cast(
tf.matmul(verb_prob, self.verb_to_HO_matrix) > 0,
tf.float32)
if 'cls_prob_verbs' not in self.predictions:
self.predictions['cls_prob_verbs'] = 0
if self.model_name.__contains__('_l0_'):
self.predictions['cls_prob_verbs'] = 0
self.predictions['cls_prob_verbs'] += (tmp_fc7_O_vectors + tmp_fc7_verbs_vectors)
self.score_summaries.update(self.predictions)
def get_hoi_labels(self):
if self.gt_class_HO_for_D_verbs is not None:
# we might have changed label in Fabricator
return self.gt_class_HO_for_D_verbs
else:
if self.model_name.__contains__('VCOCO') and self.model_name.__contains__('CL'):
return self.gt_compose
return self.gt_class_HO
def add_visual_for_test(self, fc7_HO_raw, fc7_H_raw, fc7_O_raw, head, is_training, pool5_O):
self.test_visualize['fc7_H_raw'] = tf.expand_dims(tf.reduce_mean(fc7_H_raw, axis=-1), axis=-1)
self.test_visualize['fc7_O_raw'] = tf.expand_dims(tf.reduce_mean(fc7_O_raw, axis=-1), axis=-1)
if fc7_HO_raw is not None:
self.test_visualize['fc7_HO_raw'] = tf.expand_dims(tf.reduce_mean(fc7_HO_raw, axis=-1), axis=-1)
self.test_visualize['fc7_H_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(fc7_H_raw, 0), tf.float32))
self.test_visualize['fc7_O_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(fc7_O_raw, 0), tf.float32))
if fc7_HO_raw is not None:
self.test_visualize['fc7_HO_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(fc7_HO_raw, 0), tf.float32))
res5_ho_h = self.res5_ho(self.extract_pool5_HO(head, self.H_boxes, is_training, pool5_O, None), is_training,
'h')
if self.model_name.__contains__('humans'):
res5_ho_o = self.crop_pool_layer(head, self.O_boxes, 'Crop_HO_h')
else:
res5_ho_o = self.res5_ho(self.extract_pool5_HO(head, self.O_boxes, is_training, pool5_O, None), is_training,
'o')
print("res5_ho_o", res5_ho_o, res5_ho_h)
if res5_ho_h is not None and res5_ho_o is not None:
self.test_visualize['res5_ho_H'] = tf.expand_dims(tf.reduce_mean(res5_ho_h, axis=-1), axis=-1)
self.test_visualize['res5_ho_O'] = tf.expand_dims(tf.reduce_mean(res5_ho_o, axis=-1), axis=-1)
self.test_visualize['res5_ho_H_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(res5_ho_h, 0), tf.float32))
self.test_visualize['res5_ho_O_acts_num'] = tf.reduce_sum(tf.cast(tf.greater(res5_ho_o, 0), tf.float32))
def add_pattern(self, name = 'pattern'):
with tf.variable_scope(name) as scope:
with tf.variable_scope(self.scope, self.scope):
conv1_sp = slim.conv2d(self.spatial[:, :, :, 0:2][:self.get_num_stop()], 64, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv1_sp')
pool1_sp = slim.max_pool2d(conv1_sp, [2, 2], scope='pool1_sp')
conv2_sp = slim.conv2d(pool1_sp, 32, [5, 5], reuse=tf.AUTO_REUSE, padding='VALID', scope='conv2_sp')
pool2_sp = slim.max_pool2d(conv2_sp, [2, 2], scope='pool2_sp')
pool2_flat_sp = slim.flatten(pool2_sp)
return pool2_flat_sp
def additional_loss(self, fc7_O, fc7_H, fc7_verbs, fc7_verbs_raw, initializer, is_training):
if self.model_name.__contains__('_vloss'):
self.verbs_loss(fc7_verbs, is_training, initializer)
if self.model_name.__contains__('_objloss'):
self.objects_loss(fc7_O, is_training, initializer, 'objects_loss', label='_o')
def get_num_stop(self):
"""
following iCAN, spatial pattern include all negative samples. verb-object branch is for positive samples
self.H_num is the partition for positive sample and negative samples.
:return:
"""
if self.model_name.__contains__('batch'):
# This is for batch style. i.e. there are multiple images in each batch.
return self.H_num
num_stop = tf.shape(self.H_boxes)[0] # for selecting the positive items
if self.model_name.__contains__('_new'):
print('new Add H_num constrains')
num_stop = self.H_num
elif self.model_name.__contains__('_x5new'): # contain some negative items
# I use this strategy cause I found by accident that including
# some negative samples in the positive samples can improve the performance a bit (abount 0.2%).
# TODO I think it might have a better solution.
# No-Frills Human-Object Interaction Detection provides some support
# I think VCL do not depend on this. If someone finds This has important impact on result,
# feel happy to contact me.
H_num_tmp = tf.cast(self.H_num, tf.int32)
num_stop = tf.cast(num_stop, tf.int32)
num_stop = H_num_tmp + tf.cast((num_stop - H_num_tmp) // 8, tf.int32)
else:
num_stop = self.H_num
return num_stop
def get_compose_num_stop(self):
num_stop = self.get_num_stop()
return num_stop
def extract_pool5_HO(self, head, cboxes, is_training, pool5_O, head_mask = None, name=''):
if self.model_name.__contains__('_union'):
pool5_HO = self.crop_pool_layer(head, cboxes, name + 'Crop_HO')
self.test_visualize["pool5_HO"] = tf.expand_dims(tf.reduce_mean(pool5_HO, axis=-1), axis=-1)
elif self.model_name.__contains__('_humans'):
print("humans")
pool5_HO = self.crop_pool_layer(head, self.H_boxes[:self.get_num_stop()],name + 'Crop_HO_h')
self.test_visualize["pool5_HO"] = tf.expand_dims(tf.reduce_mean(pool5_HO, axis=-1), axis=-1)
else:
# pool5_HO = self.crop_pool_layer(head, cboxes, 'Crop_HO')
pool5_HO = None
print("{} doesn\'t support pool5_HO".format(self.model_name))
return pool5_HO
def add_loss(self):
import math
with tf.variable_scope('LOSS') as scope:
num_stop = self.get_num_stop()
if self.model_name.__contains__('_VCOCO'):
label_H = self.gt_class_H
label_HO = self.gt_class_HO
label_sp = self.gt_class_sp
if self.model_name.__contains__('_CL'):
label_H = self.gt_compose
label_HO = self.gt_compose
label_sp = self.gt_compose
else:
label_H = self.gt_class_HO[:num_stop]
# label_HO = self.gt_class_HO_for_verbs
label_HO = self.gt_class_HO[:num_stop]
label_sp = self.gt_class_HO
if "cls_score_H" in self.predictions:
cls_score_H = self.predictions["cls_score_H"]
"""
The re-weighting strategy has an important effect on the performance.
This will also improve largely our baseline in both common and zero-shot setting.
We copy from TIN.
"""
if self.model_name.__contains__('_rew'):
cls_score_H = tf.multiply(cls_score_H, self.HO_weight)
H_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_H,
logits=cls_score_H[:num_stop, :]))
self.losses['H_cross_entropy'] = H_cross_entropy
if "cls_score_O" in self.predictions:
cls_score_O = self.predictions["cls_score_O"]
if self.model_name.__contains__('_rew'):
cls_score_O = tf.multiply(cls_score_O, self.HO_weight)
O_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_HO,
logits=cls_score_O[:num_stop, :]))
self.losses['O_cross_entropy'] = O_cross_entropy
if "cls_score_sp" in self.predictions:
cls_score_sp = self.predictions["cls_score_sp"]
if self.model_name.__contains__('_rew'):
cls_score_sp = tf.multiply(cls_score_sp, self.HO_weight)
elif self.model_name.__contains__('_xrew'):
reweights = np.log(1 / (self.num_inst_all / np.sum(self.num_inst_all)))
cls_score_sp = tf.multiply(cls_score_sp, reweights)
print(label_sp, cls_score_sp)
sp_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_sp, logits=cls_score_sp))
self.losses['sp_cross_entropy'] = sp_cross_entropy
if self.model_name.startswith('_V_'):
cls_score_hoi = self.predictions["cls_score_hoi"]
if self.model_name.__contains__('_rew'):
cls_score_hoi = tf.multiply(cls_score_hoi, self.HO_weight)
hoi_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_HO[:num_stop, :], logits=cls_score_hoi[:num_stop, :]))
self.losses['hoi_cross_entropy'] = hoi_cross_entropy
loss = hoi_cross_entropy
elif self.model_name.__contains__('_fac_'):
# factorized
gt_verb_label = self.gt_verb_class[:num_stop, :]
gt_obj_label = self.gt_obj_class[:num_stop, :]
# label_verb = tf.matmul()
cls_score_verbs = self.predictions["cls_score_verbs_f"][:num_stop, :]
cls_score_objs = self.predictions["cls_score_objs"][:num_stop, :]
hoi_cross_entropy = self.add_factorized_hoi_loss(cls_score_objs, cls_score_verbs, gt_obj_label,
gt_verb_label)
# result = tf.equal(tf.cast(tmp_verb_prob * gt_verb_label > 0.5, tf.float32),
# tf.cast(gt_verb_label, tf.float32))
# print('res', result)
# tmp_hoi_loss = tf.Print(tmp_hoi_loss, [tf.shape(result)], 'HOI acc:')
self.losses['verbs_cross_entropy'] = hoi_cross_entropy
# self.losses["pos_hoi_cross_entropy"] = tf.reduce_mean(
# tf.reduce_sum(tmp_verb_loss * gt_verb_label, axis=-1) / tf.reduce_sum(gt_verb_label, axis=-1))
# self.losses["pos_sp_cross_entropy"] = tf.reduce_mean(
# tf.reduce_sum(tmp_sp_cross_entropy * label_sp, axis=-1) / tf.reduce_sum(label_sp, axis=-1))
lamb = self.get_lamb_1()
if "cls_score_sp" not in self.predictions:
sp_cross_entropy = 0
self.losses['sp_cross_entropy'] = 0
loss = sp_cross_entropy + hoi_cross_entropy * lamb
elif self.model_name.startswith('VCL_') or self.model_name.startswith('FCL_') \
or self.model_name.startswith('ATL_'):
tmp_label_HO = self.get_hoi_labels()[:num_stop]
cls_score_hoi = self.predictions["cls_score_hoi"][:num_stop, :]
if self.model_name.__contains__('_rew'):
cls_score_hoi = tf.multiply(cls_score_hoi, self.HO_weight)
elif self.model_name.__contains__('_xrew'):
reweights = np.log(1 / (self.num_inst / np.sum(self.num_inst)))
# print(reweights, self.HO_weight, self.num_inst_all, self.num_inst)
# import ipdb;ipdb.set_trace()
cls_score_hoi = tf.multiply(cls_score_hoi, reweights)
if self.model_name.__contains__('batch') and self.model_name.__contains__('semi'):
semi_filter = tf.reduce_sum(self.H_boxes[:tf.shape(cls_score_hoi)[0], 1:], axis=-1)
semi_filter = tf.cast(semi_filter, tf.bool)
tmp_label_HO = tf.boolean_mask(tmp_label_HO, semi_filter, axis=0)
cls_score_hoi = tf.boolean_mask(cls_score_hoi, semi_filter, axis=0)
tmp_hoi_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tmp_label_HO, logits=cls_score_hoi)
hoi_cross_entropy = tf.reduce_mean(tmp_hoi_loss)
self.losses['hoi_cross_entropy'] = hoi_cross_entropy
lamb = self.get_lamb_1()
if "cls_score_sp" not in self.predictions:
sp_cross_entropy = 0
self.losses['sp_cross_entropy'] = 0
loss = sp_cross_entropy + hoi_cross_entropy * lamb
if self.model_name.__contains__('_orig_'):
loss = loss + O_cross_entropy + H_cross_entropy
print('Add all loss')
if 'fake_G_cls_score_hoi' in self.predictions:
fake_cls_score_verbs = self.predictions["fake_G_cls_score_hoi"]
if self.model_name.__contains__('_rew_'):
fake_cls_score_verbs = tf.multiply(fake_cls_score_verbs, self.HO_weight)
elif self.model_name.__contains__('_rew2'):
fake_cls_score_verbs = tf.multiply(fake_cls_score_verbs, self.HO_weight / 10)
elif self.model_name.__contains__('_rew1'):
fake_cls_score_verbs = tf.multiply(fake_cls_score_verbs, self.HO_weight)
elif self.model_name.__contains__('rewn'):
pass
print(self.gt_class_HO_for_G_verbs, fake_cls_score_verbs, '======================================')
self.losses['fake_G_verbs_cross_entropy'] = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.gt_class_HO_for_G_verbs, logits=fake_cls_score_verbs))
if 'fake_G_total_loss' not in self.losses:
self.losses['fake_G_total_loss'] = 0
gll = 1.
self.losses['fake_G_total_loss'] += (self.losses['fake_G_verbs_cross_entropy'] * gll)
else:
loss = H_cross_entropy + O_cross_entropy + sp_cross_entropy
# verb loss
temp = self.add_verb_loss(num_stop)
loss += temp
if self.model_name.__contains__('_objloss'):
obj_cls_cross_entropy = self.add_objloss(num_stop)
print('add objloss')
loss += obj_cls_cross_entropy
self.losses['total_loss'] = loss
self.event_summaries.update(self.losses)
print(self.losses)
print(self.predictions)
return loss
def add_factorized_hoi_loss(self, cls_score_objs, cls_score_verbs, gt_obj_label, gt_verb_label):
# cls_score_verbs = tf.multiply(cls_score_verbs, self.HO_weight)
# cls_score_objs = tf.multiply(cls_score_objs, self.HO_weight)
# tmp_label_HO = tf.Print(tmp_label_HO, [tf.shape(tmp_label_HO), tf.shape(cls_score_verbs)],'sdfsdfsdf')
# print('=======', tmp_label_HO, cls_score_verbs)
if self.model_name.__contains__('batch') and self.model_name.__contains__('semi'):
semi_filter = tf.reduce_sum(self.H_boxes[:tf.shape(cls_score_verbs)[0], 1:], axis=-1)
semi_filter = tf.cast(semi_filter, tf.bool)
gt_verb_label = tf.boolean_mask(gt_verb_label, semi_filter, axis=0)
gt_obj_label = tf.boolean_mask(gt_obj_label, semi_filter, axis=0)
cls_score_verbs = tf.boolean_mask(cls_score_verbs, semi_filter, axis=0)
cls_score_objs = tf.boolean_mask(cls_score_objs, semi_filter, axis=0)
tmp_verb_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=gt_verb_label, logits=cls_score_verbs)
tmp_obj_loss = tf.nn.sigmoid_cross_entropy_with_logits(
labels=gt_obj_label, logits=cls_score_objs)
hoi_cross_entropy = tf.reduce_mean(tmp_verb_loss) + tf.reduce_mean(tmp_obj_loss)
return hoi_cross_entropy
def get_lamb_1(self):
lamb = 1
if self.model_name.__contains__('_l05_'):
lamb = 0.5
elif self.model_name.__contains__('_l2_'):
lamb = 2
elif self.model_name.__contains__('_l0_'):
lamb = 0
elif self.model_name.__contains__('_l1_'):
lamb = 1
elif self.model_name.__contains__('_l15_'):
lamb = 1.5
elif self.model_name.__contains__('_l25_'):
lamb = 2.5
elif self.model_name.__contains__('_l3_'):
lamb = 3
elif self.model_name.__contains__('_l4_'):
lamb = 4
return lamb
def filter_loss(self, cls_score_sp, label_sp):
if self.model_name.__contains__('batch') and self.model_name.__contains__('semi'):
semi_filter = tf.reduce_sum(self.H_boxes[:tf.shape(cls_score_sp)[0], 1:], axis=-1)
# label_sp = tf.Print(label_sp, [tf.shape(semi_filter), semi_filter, self.H_boxes, tf.shape(label_sp)], 'batch debug0:', first_n=000, summarize=1000)
semi_filter = tf.cast(semi_filter, tf.bool)
# label_sp = tf.Print(label_sp, [tf.shape(semi_filter), semi_filter, tf.shape(label_sp)], 'batch debug:', first_n=000, summarize=1000)
label = tf.boolean_mask(label_sp, semi_filter, axis=0)
logits = tf.boolean_mask(cls_score_sp, semi_filter, axis=0)
# label = tf.Print(label, [tf.shape(semi_filter), tf.shape(label)], 'batch debug1:', first_n=000)
sp_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label, logits=logits))
else:
sp_cross_entropy = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label_sp, logits=cls_score_sp))
return sp_cross_entropy
def cal_loss_by_weights(self, cls_score, label, orig_weights):
sp_cross_entropy = tf.multiply(
tf.nn.sigmoid_cross_entropy_with_logits(labels=label, logits=cls_score), orig_weights)
sp_cross_entropy = tf.reduce_mean(sp_cross_entropy)
return sp_cross_entropy
def obtain_cbl_weights(self, tmp_label_HO, weights):
# weights = tf.expand_dims(weights, 0)
weights = tf.tile(weights, [tf.shape(tmp_label_HO)[0], 1]) * tmp_label_HO
weights = tf.reduce_sum(weights, axis=1)
weights = tf.expand_dims(weights, 1)
weights = tf.tile(weights, [1, self.num_classes])
return weights
def add_objloss(self, num_stop):
obj_cls_score = self.predictions["obj_cls_score_o"]
if self.model_name.__contains__('_ce'):
obj_cls_cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
labels=self.gt_obj_class[:num_stop], logits=obj_cls_score[:num_stop, :]))
else:
label_obj = tf.cast(
tf.matmul(self.get_hoi_labels(), self.obj_to_HO_matrix, transpose_b=True) > 0,
tf.float32)
obj_cls_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=label_obj[:tf.shape(obj_cls_score)[0], :], logits=obj_cls_score))
self.losses["obj_cls_cross_entropy_o"] = obj_cls_cross_entropy
model_name = self.model_name
if model_name.__contains__('_pobjloss'):
model_name = model_name.replace("_pobjloss", '_objloss')
lambda1 = 0.1
if model_name.__contains__('_objloss10'):
lambda1 = 1.0
elif self.model_name.__contains__('_objloss20'):
lambda1 = 2.0
elif model_name.__contains__('_objloss1'):
lambda1 = 0.5
elif model_name.__contains__('_objloss2'):
lambda1 = 0.3
elif model_name.__contains__('_objloss3'):
lambda1 = 0.08
elif model_name.__contains__('_objloss4'):
lambda1 = 0.05
temp = (obj_cls_cross_entropy * lambda1)
return temp
def add_verb_loss(self, num_stop):
temp = 0
if 'verb_cls_score' in self.predictions:
vloss_num_stop = num_stop
verb_cls_score = self.predictions["verb_cls_score"]
verb_cls_cross_entropy = self.filter_loss(verb_cls_score[:vloss_num_stop, :],
self.gt_verb_class[:vloss_num_stop])
self.losses["verb_cls_cross_entropy"] = verb_cls_cross_entropy
if 'verb_cls_score_gcn' in self.predictions:
verb_cls_score = self.predictions["verb_cls_score_gcn"]
verb_cls_cross_entropy1 = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.gt_verb_class[:vloss_num_stop], logits=verb_cls_score[:vloss_num_stop, :]))
self.losses["verb_cls_cross_entropy_gcn"] = verb_cls_cross_entropy1
verb_cls_cross_entropy += verb_cls_cross_entropy1
print('add vloss-------')
# neg 0.1, negv1 0.5 negv12 0.1 1
lambda1 = 0.1
if self.model_name.__contains__('vloss10'):
lambda1 = 1.0
elif self.model_name.__contains__('vloss20'):
lambda1 = 2.0
elif self.model_name.__contains__('vloss1'):
lambda1 = 0.5
elif self.model_name.__contains__('vloss2'):
lambda1 = 0.3
elif self.model_name.__contains__('vloss3'):
lambda1 = 0.08
elif self.model_name.__contains__('vloss4'):
lambda1 = 0.05
temp = (verb_cls_cross_entropy * lambda1)
if 'verb_cls_score_nvgcn_a' in self.predictions:
vloss_num_stop = num_stop
verb_cls_score = self.predictions["verb_cls_score_nvgcn_a"]
verb_cls_cross_entropy = self.filter_loss(verb_cls_score[:vloss_num_stop, :],
self.gt_verb_class[:vloss_num_stop])
self.losses["verb_cls_cross_entropy_nvgcn_a"] = verb_cls_cross_entropy
print('add vloss===========')
# neg 0.1, negv1 0.5 negv12 0.1 1
lambda1 = 0.1
if self.model_name.__contains__('_nvgcn_a10'):
lambda1 = 1.0
elif self.model_name.__contains__('_nvgcn_a1'):
lambda1 = 0.5
elif self.model_name.__contains__('_nvgcn_a2'):
lambda1 = 0.3
elif self.model_name.__contains__('_nvgcn_a3'):
lambda1 = 0.08
elif self.model_name.__contains__('_nvgcn_a4'):
lambda1 = 0.05
temp += (verb_cls_cross_entropy * lambda1)
return temp
def add_verb_ho_loss(self, num_stop):
vloss_num_stop = num_stop
verb_cls_score = self.predictions["verb_cls_score"]
verb_cls_cross_entropy = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
labels=self.gt_class_HO[:vloss_num_stop], logits=verb_cls_score[:vloss_num_stop, :]))
self.losses["verb_cls_cross_entropy"] = verb_cls_cross_entropy
print('add vloss')
# neg 0.1, negv1 0.5 negv12 0.1 1
lambda1 = 1
temp = (verb_cls_cross_entropy * lambda1)
return temp
def train_step(self, sess, blobs, lr, train_op):
feed_dict = self.get_feed_dict(blobs)
loss, _ = sess.run([self.losses['total_loss'],
train_op],
feed_dict=feed_dict)
return loss
def train_step_with_summary(self, sess, blobs, lr, train_op):
feed_dict = self.get_feed_dict(blobs)
loss, summary, _ = sess.run([self.losses['total_loss'],
self.summary_op,
train_op],
feed_dict=feed_dict)
return loss, summary
| {"hexsha": "d2839f333507812c9e348e41708e8c483e638800", "size": 42229, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/networks/HOI.py", "max_stars_repo_name": "abreza/HOI-CL", "max_stars_repo_head_hexsha": "c5be517bb26eac73ef88a39d6ec9e564c3379714", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 40, "max_stars_repo_stars_event_min_datetime": "2021-04-09T17:53:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T02:38:10.000Z", "max_issues_repo_path": "lib/networks/HOI.py", "max_issues_repo_name": "abreza/HOI-CL", "max_issues_repo_head_hexsha": "c5be517bb26eac73ef88a39d6ec9e564c3379714", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2021-04-09T19:05:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-31T23:17:16.000Z", "max_forks_repo_path": "lib/networks/HOI.py", "max_forks_repo_name": "abreza/HOI-CL", "max_forks_repo_head_hexsha": "c5be517bb26eac73ef88a39d6ec9e564c3379714", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 8, "max_forks_repo_forks_event_min_datetime": "2021-05-30T12:37:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-14T03:13:57.000Z", "avg_line_length": 52.9185463659, "max_line_length": 161, "alphanum_fraction": 0.6024769708, "include": true, "reason": "import numpy", "num_tokens": 9974} |
/******************************************************************************
* Author(s): Christopher J. Havlicek
*
* See LICENSE and CONTACTS.
******************************************************************************/
#include "utility.hpp"
#include "contract.hpp"
#include "error/master.hpp"
#include "error/node_manip.hpp"
#include "io.hpp"
#include "kmap.hpp"
#include <boost/algorithm/string/case_conv.hpp>
#include <boost/filesystem.hpp>
#include <boost/uuid/random_generator.hpp>
#include <boost/uuid/string_generator.hpp>
#include <boost/uuid/uuid_io.hpp>
#include <emscripten.h>
#include <emscripten/val.h>
#include <openssl/md5.h>
#include <range/v3/action/sort.hpp>
#include <range/v3/action/transform.hpp>
#include <range/v3/algorithm/copy.hpp>
#include <range/v3/algorithm/copy.hpp>
#include <range/v3/algorithm/count_if.hpp>
#include <range/v3/algorithm/find.hpp>
#include <range/v3/algorithm/find_if.hpp>
#include <range/v3/algorithm/find_if_not.hpp>
#include <range/v3/algorithm/replace.hpp>
#include <range/v3/range/conversion.hpp>
#include <range/v3/view/drop.hpp>
#include <range/v3/view/enumerate.hpp>
#include <range/v3/view/filter.hpp>
#include <range/v3/view/indices.hpp>
#include <range/v3/view/join.hpp>
#include <range/v3/view/remove.hpp>
#include <range/v3/view/replace.hpp>
#include <range/v3/view/split.hpp>
#include <range/v3/view/take.hpp>
#include <range/v3/view/transform.hpp>
#include <zlib.h>
#include <algorithm>
#include <chrono>
#include <cstdlib>
#include <iterator>
#include <string>
#include <vector>
using namespace ranges;
namespace fs = boost::filesystem;
namespace kmap {
template<>
auto from_string( std::string const& s )
-> Result< bool >
{
auto rv = KMAP_MAKE_RESULT_EC( bool, error_code::common::conversion_failed );
if( s == "true" )
{
rv = true;
}
else if( s == "false" )
{
rv = false;
}
return rv;
}
auto compress_resource( std::byte const* data
, size_t const size )
-> std::vector< std::byte >
{
auto rv = std::vector< std::byte >{};
rv.resize( size );
auto stream = [ & ]
{
auto zsm = z_stream{};
zsm.zalloc = Z_NULL;
zsm.zfree = Z_NULL;
zsm.opaque = Z_NULL;
zsm.avail_in = size;
zsm.next_in = reinterpret_cast< Bytef z_const* >( data );
zsm.avail_out = rv.size();
zsm.next_out = reinterpret_cast< Bytef* >( rv.data() );
return zsm;
}();
// TODO: Return failure instead of assertion.
auto const compression_level = 8; // According to the benchmarks I've seen, level 8 gives the best compression ratio for our use without being egregiously slow.
if( auto const succ = deflateInit( &stream, compression_level )
; succ < 0 )
{
KMAP_THROW_EXCEPTION_MSG( "deflateInit failed" );
}
if( auto const succ = deflate( &stream, Z_FINISH )
; succ < 0 )
{
KMAP_THROW_EXCEPTION_MSG( "deflate failed" );
}
if( auto const succ = deflateEnd( &stream )
; succ < 0 )
{
KMAP_THROW_EXCEPTION_MSG( "deflateEnd failed" );
}
rv.resize( stream.total_out );
rv.shrink_to_fit();
return rv;
}
auto configure_terminate()
-> void
{
std::set_terminate( []()
{
fmt::print( stderr
, "[terminate_handler]: std::terminate called!\n" );
auto eptr = std::current_exception();
if( eptr )
{
try
{
std::rethrow_exception( eptr );
}
catch( std::exception& e )
{
fmt::print( stderr
, "[terminate_handler]: std::terminate called: {}\n"
, e.what() );
abort();
}
}
else
{
fmt::print( "std::terminate called\n" );
abort();
}
} );
}
auto decompress_resource( std::byte const* data
, size_t const data_size
, size_t const out_size )
-> std::vector< std::byte >
{
auto rv = std::vector< std::byte >{};
rv.resize( out_size );
auto stream = [ & ]
{
auto zsm = z_stream{};
zsm.zalloc = Z_NULL;
zsm.zfree = Z_NULL;
zsm.opaque = Z_NULL;
zsm.avail_in = data_size;
zsm.next_in = reinterpret_cast< Bytef z_const* >( data );
zsm.avail_out = rv.size();
zsm.next_out = reinterpret_cast< Bytef* >( rv.data() );
return zsm;
}();
// TODO: Return failure instead of assertion.
assert( inflateInit( &stream ) >= 0 );
assert( inflate( &stream
, Z_NO_FLUSH ) >= 0 );
assert( inflateEnd( &stream ) >= 0 );
rv.resize( stream.total_out );
rv.shrink_to_fit();
return rv;
}
auto gen_uuid()
-> Uuid
{
auto rv = Uuid{};
BC_CONTRACT()
BC_POST([ & ]
{
BC_ASSERT( !rv.is_nil() );
})
;
using boost::uuids::random_generator;
rv = random_generator{}();
return rv;
}
// TODO: Use fstream instead?
auto gen_md5_uuid( FILE* fp )
-> Uuid
{
auto rv = Uuid{};
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( MD5_DIGEST_LENGTH == sizeof( Uuid::data ) );
})
BC_POST([ & ]
{
BC_ASSERT( !rv.is_nil() );
})
;
using std::array;
using InBuf = array< char
, 512 >;
auto inbuf = InBuf{};
auto ctx = MD5_CTX{};
auto bytes = fread( inbuf.data()
, 1
, 512
, fp );
MD5_Init( &ctx );
while( bytes > 0 )
{
MD5_Update( &ctx
, inbuf.data()
, bytes );
bytes = fread( inbuf.data()
, 1
, 512
, fp );
}
MD5_Final( rv.data
, &ctx );
return rv;
}
auto gen_md5_uuid( std::byte const* data
, size_t const size )
-> Uuid
{
auto rv = Uuid{};
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( MD5_DIGEST_LENGTH == sizeof( Uuid::data ) );
})
BC_POST([ & ]
{
BC_ASSERT( !rv.is_nil() );
})
;
auto const chunk_size = size_t{ 512 };
auto const dend = data + size;
auto next = [ chunk_size
, dend ]
( std::byte const* pos )
{
assert( pos <= dend );
if( pos == dend )
{
return dend;
}
else if( pos + chunk_size > dend )
{
return dend;
}
else
{
return std::next( pos
, chunk_size );
}
};
auto pos = data;
auto next_pos = next( pos );
auto count = distance( pos
, next_pos );
auto ctx = MD5_CTX{};
MD5_Init( &ctx );
while( count > 0 )
{
MD5_Update( &ctx
, pos
, count );
pos = next_pos;
next_pos = next( pos );
count = distance( pos
, next_pos );
}
MD5_Final( rv.data
, &ctx );
return rv;
}
auto gen_uuid_string()
-> std::string
{
return to_string( gen_uuid() );
}
// TODO: rename to to_uuid()?
auto uuid_from_string( std::string const& suuid )
-> Result< Uuid >
{
using boost::uuids::string_generator;
try
{
return string_generator{}( suuid );
}
catch( std::exception const& e )
{
io::print( "exception: {}\n", e.what() );
// std::cerr << e.what() << '\n'; TODO: Add to EC payload?
return KMAP_MAKE_ERROR( error_code::node::invalid_uuid );
}
}
auto gen_temp_db_name()
-> std::string
{
return ".tmp." + gen_uuid_string() + ".kmap";
}
auto to_ordering_id( Uuid const& id )
-> std::string
{
auto const sid = to_string( id );
return sid
| views::take( 8 )
| to< std::string >();
}
auto to_uuids( HeadingPath const& path
, Database& db
, Uuid const& root )
-> std::vector< Uuid >
{
auto rv = std::vector< Uuid >{};
auto parent = root;
for( auto const& e : path )
{
auto const ou = db.fetch_child( e
, parent );
if( !ou )
{
// TODO: report error.
assert( false );
}
rv.emplace_back( parent );
parent = *ou;
}
return rv;
}
auto leaf( UuidPath const& path )
-> Uuid
{
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( path.size() > 0 );
})
;
return path.back();
}
auto mid( std::vector< Uuid > const& ids )
-> Uuid
{
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( ids.size() > 0 );
})
;
using std::ceil;
auto const offset = static_cast< uint32_t >( ceil( ids.size() / 2.0 ) );
return ids[ offset - 1 ];
}
auto match_length( std::string const& s
, std::string const& to )
-> uint32_t
{
using std::min;
auto rv = uint32_t{};
for( auto const i : views::indices( min( s.size()
, to.size() ) ) )
{
if( s[ i ] == to[ i ] )
{
++rv;
}
else
{
break;
}
}
return rv;
}
auto is_valid_heading_char( char const c )
-> bool
{
if( std::isalnum( c ) )
{
if( std::isalpha( c ) )
{
if( std::islower( c ) )
{
return true;
}
}
else
{
return true;
}
}
else if( c == '_' )
{
return true;
}
return false;
}
// TODO: Replace with Boost.Spirit
auto fetch_first_invalid( Heading const& heading )
-> Optional< uint32_t >
{
auto rv = Optional< uint32_t >{};
BC_CONTRACT()
BC_POST([ & ]
{
if( rv )
{
BC_ASSERT( *rv < heading.size() );
}
})
;
auto const it = find_if_not( heading
, is_valid_heading_char );
if( it != heading.end() )
{
rv = distance( heading.begin()
, it );
}
return rv;
}
// TODO: Should not rather constraints on Heading be expressed in the constructor
// of the Heading class? This way, any place a heading is passed, it is known to be
// in a good, valid state.
auto is_valid_heading( Heading const& heading )
-> bool
{
return !fetch_first_invalid( heading );
}
auto is_valid_heading_path( std::string const& path )
-> bool
{
auto is_valid = []( auto const c )
{
return is_valid_heading_char( c )
|| c == ',' // back
|| c == '.' // forward
|| c == '\'' // disambiguator
|| c == '/'; // root
};
return end( path ) == find_if_not( path
, is_valid );
}
auto format_title( Heading const& heading )
-> Title
{
BC_CONTRACT()
BC_POST([ & ]
{
BC_ASSERT( is_valid_heading( heading ) );
})
;
// TODO: Figure out how to do this all elegantly with one range statement.
auto rv = heading
| views::transform( []( auto const& e ){ return tolower( e ); } )
| views::split( '_' )
| views::transform( []( auto const& e ){ return to< std::string >( e ); } )
| to< StringVec >();
// Capitalize first of every word.
for( auto&& e : rv )
{
e[ 0 ] = toupper( e[ 0 ] );
}
return rv
| views::join( ' ' )
| to< Title >();
}
auto format_heading( Title const& title )
-> Heading
{
auto rv = Heading{};
BC_CONTRACT()
BC_POST([ & ]
{
BC_ASSERT( is_valid_heading( rv ) );
})
;
auto heading = title;
boost::to_lower( heading );
for( auto const [ index, c ] : views::enumerate( heading ) )
{
if( !is_valid_heading_char( c ) )
{
heading[ index ] = '_';
}
}
rv = heading;
return rv;
}
auto flatten( HeadingPath const& path )
-> Heading
{
return path
| views::join( '.' )
| to< Heading >();
}
auto to_string( Color const& c )
-> std::string
{
switch( c )
{
case Color::white: return "white";
case Color::black: return "black";
case Color::red: return "red";
case Color::orange: return "orange";
case Color::yellow: return "yellow";
case Color::green: return "green";
case Color::blue: return "blue";
case Color::indigo: return "indigo";
case Color::violet: return "violet";
default: assert( false );
}
return ""; // Avoid compiler warning.
}
auto to_string( bool const b )
-> std::string
{
if( !b )
{
return "false";
}
else
{
return "true";
}
}
auto to_uint64( std::string const& s
, int const base )
-> Result< uint64_t >
{
static_assert( sizeof( decltype( std::stoull( "", 0, base ) ) ) == sizeof( uint64_t ) );
auto rv = KMAP_MAKE_RESULT( uint64_t );
BC_CONTRACT()
BC_POST([ & ]
{
if( rv )
{
// BC_ASSERT( to_string( rv.value() ) == s ); // TODO.
}
})
;
try
{
rv = std::stoull( s, 0, base );
}
catch( std::exception& e )
{
// TODO: propagate payload?
io::print( stderr
, "to_uint64 failed: {}"
, e.what() );
rv = KMAP_MAKE_ERROR( error_code::common::conversion_failed ); // TODO: numeric::conversion_failed?
}
return rv;
}
auto to_uint64( std::string const& s )
-> Result< uint64_t >
{
return to_uint64( s, 10 );
}
// TODO: 'id' Should probably be a integral constant (C++20)
auto to_uint64( Uuid const& id )
-> Result< uint64_t >
{
auto rv = KMAP_MAKE_RESULT( uint64_t );
BC_CONTRACT()
BC_POST([ & ]
{
if( rv )
{
BC_ASSERT( to_uuid( rv.value() ) == id );
}
})
;
static_assert( sizeof( id.data ) == sizeof( uint64_t ) * 2 );
auto tv = uint64_t{};
std::copy( std::begin( id.data ) + sizeof( uint64_t ), std::end( id.data )
, reinterpret_cast< char* >( &tv ) );
// Only first 8 bytes are allowed to contain data.
KMAP_ENSURE( rv, tv == 0, error_code::common::conversion_failed );
std::copy( std::begin( id.data ), std::begin( id.data ) + sizeof( uint64_t )
, reinterpret_cast< char* >( &tv ) );
rv = tv;
return rv;
}
// TODO: 'id' Should probably be a integral constant (C++20)
auto to_uuid( uint64_t const& id )
-> Uuid
{
auto rv = Uuid{};
BC_CONTRACT()
BC_POST([ & ]
{
//BC_ASSERT( to_uint64( rv ) == id ); TODO?
})
;
auto const* id_bs = reinterpret_cast< char const* >( &id );
auto const id_begin = id_bs;
auto const id_end = id_bs + sizeof( id );
std::copy( id_begin, id_end
, std::begin( rv.data ) );
return rv;
}
auto longest_common_prefix( StringVec const& ss )
-> std::string
{
if( ss.empty() )
{
return {};
}
auto const first = ss[ 0 ];
auto count = uint32_t{};
for( auto const& i : views::indices( first.size() ) )
{
auto common = true;
for( auto const& e : ss )
{
if( i >= e.size()
|| first[ i ] != e[ i ] )
{
common = false;
}
}
if( common )
{
count = i + 1;
}
else
{
break;
}
}
return first
| views::take( count )
| to< std::string >();
}
auto map_match_lengths( Heading const& from
, std::vector< Heading > const& to )
-> std::vector< std::pair< uint32_t
, Heading > >
{
auto map_len = [ from ]( std::string const& e )
-> std::pair< uint32_t
, std::string >
{
return { match_length( from
, e )
, e };
};
return to
| views::transform( map_len )
| to_vector;
}
auto match_closest( Heading const& unknown
, std::vector< Heading > const& knowns )
-> Heading
{
auto const matches = map_match_lengths( unknown
, knowns )
| actions::sort( []( auto const& lhs
, auto const& rhs ) { return lhs.first > rhs.first; } );
auto const out = [ & ]
{
auto const& ult = matches[ 0 ];
auto const& penult = matches[ 1 ];
if( ult.first == 0) // No match length.
{
}
if( ult.first == penult.first ) // Competing match length.
{
// TODO: what is the point of calling match_length here? Just use
// ult.first or penult.first.
auto const shared = match_length( ult.second
, penult.second );
auto const rv = ult.second | views::take( shared )
| to< std::string >();
return rv;
}
else if( unknown.size() == ult.second
.size() )
{
return unknown;
}
else
{
return ult.second;
}
}();
return out;
}
auto fetch_completions( std::string const& unknown
, StringVec const& knowns )
-> StringVec
{
auto const usize = distance( unknown );
auto filter = views::filter( [ & ]( auto const& e )
{
auto const sub = e
| views::take( usize )
| to< std::string >();
return unknown == sub;
} );
return knowns
| filter
| to< StringVec >();
}
auto markdown_to_html( std::string const& text )
-> std::string
{
using emscripten::val;
if( text.empty() )
{
return {};
}
auto v = val::global().call< val >( "convert_markdown_to_html"
, text );
if( !v.as< bool >() )
{
return "Error: markdown to html conversion failed";
}
else
{
return v.as< std::string >();
}
}
auto xor_ids( Uuid const& lhs
, Uuid const& rhs )
-> Uuid
{
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( lhs.size() == 16 );
BC_ASSERT( rhs.size() == 16 );
})
;
using std::vector;
auto v1 = vector< uint8_t >{};
auto v2 = vector< uint8_t >{};
auto vc = vector< uint8_t >{};
v1.resize( lhs.size() );
v2.resize( rhs.size() );
vc.resize( lhs.size() );
copy( lhs.begin()
, lhs.end()
, v1.begin() );
copy( rhs.begin()
, rhs.end()
, v2.begin() );
for( auto const i : views::indices( lhs.size() ) )
{
vc[ i ] = v1[ i ] ^ v2[ i ];
}
auto rv = Uuid{};
copy( vc.begin()
, vc.end()
, rv.begin() );
return rv;
}
auto make_alias_id( Uuid const& alias_src
, Uuid const& alias_dst )
-> Uuid
{
return xor_ids( alias_src
, alias_dst );
}
auto make_edge_id( Uuid const& from
, Uuid const& to )
-> Uuid
{
return xor_ids( from
, to );
}
auto alias_source( Uuid const& parent
, Uuid const& alias )
-> Uuid
{
return xor_ids( parent
, alias );
}
auto url_to_heading( std::string const url )
-> Heading
{
return url
| views::replace( '.'
, '_' )
| to< Heading >();
}
auto present_time()
-> uint64_t
{
using namespace std::chrono;
return time_point_cast< seconds >( system_clock::now() )
. time_since_epoch()
. count();
}
auto fetch_latest_state_path()
-> Optional< FsPath >
{
auto to_paths = views::transform( []( auto const& e ){ return e.path(); } );
auto filter_extension = views::filter( []( auto const& e ){ return e.extension() == ".kmap"; } );
auto sort_by_timestamp = actions::sort( []( auto const& lhs, auto const& rhs ){ return fs::last_write_time( lhs ) > fs::last_write_time( rhs ); } );
auto const di = fs::directory_iterator{ kmap_root_dir };
auto rv = di
| to_paths
| filter_extension
| to_vector
| sort_by_timestamp;
if( !rv.empty() )
{
return { rv.front() };
}
else
{
return nullopt;
}
}
// This function exists purely as a workaround for the fact that lstat does not work properly on MSYS when it is called on a non-existent file.
auto file_exists( FsPath const& p )
-> bool
{
#ifdef KMAP_MSYS
auto ec = boost::system::error_code{};
if( fs::exists( p
, ec ) )
{
return true;
}
else if( ec )
{
fmt::print( stderr
, "[Warning] `fs::exists()` failed. Note that lstat does not seem to work properly on MSYS: {}\n"
, ec.message() );
}
return false;
#else // Not MSYS
return fs::exists( p );
#endif // KMAP_MSYS
}
// WARNING,TODO: The current impl. does not properly merge all node attributes e.g., node bodies and aliases.
template< typename Stmts >
auto merge_trees_internal( Stmts& stmts
, Uuid const& src
, Uuid const& dst )
-> void
{
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( stmts.exists( src ) );
BC_ASSERT( stmts.exists( dst ) );
})
BC_POST([ & ]
{
BC_ASSERT( !stmts.exists( src ) );
})
;
auto const src_children = stmts.fetch_children( src );
auto const dst_children = stmts.fetch_children( dst );
auto const src_map = src_children
| views::transform( [ &stmts ]( auto const& e ){ return std::pair{ e, stmts.fetch_heading( e ).value() }; } )
| to_vector;
auto const dst_map = dst_children
| views::transform( [ &stmts ]( auto const& e ){ return std::pair{ e, stmts.fetch_heading( e ).value() }; } )
| to_vector;
for( auto const src_child : src_map )
{
if( auto const it = find_if( dst_map
, [ & ]( auto const& e ){ return src_child.second == e.second; } )
; it != end( dst_map ) )
{
// TODO: Merge aliases and bodies (or at least warn or fail if they are encountered).
merge_trees_internal( stmts
, src_child.first
, it->first );
}
else
{
stmts.move_node( src_child.first
, dst );
}
}
stmts.delete_node( src );
}
auto merge_trees( Kmap& kmap
, Uuid const& src
, Uuid const& dst )
-> void
{
merge_trees_internal( kmap
, src
, dst );
}
auto merge_trees( StatementPreparer& stmts
, Uuid const& src
, Uuid const& dst )
-> void
{
merge_trees_internal( stmts
, src
, dst );
}
auto merge_ranges( std::set< uint64_t > const& values )
-> std::set< std::pair< uint64_t, uint64_t > >
{
auto rv = std::set< std::pair< uint64_t, uint64_t > >{};
if( values.empty() )
{
return rv;
}
auto first = uint64_t{ *values.begin() };
auto second = first;
for( uint64_t const e : values | views::drop( 1 ) )
{
if( ( second + 1 ) != e )
{
io::print( "[{:#x},{:#x}], {:#x}\n", first, second, e );
rv.emplace( std::pair{ first, second } );
first = e;
}
second = e;
}
rv.emplace( std::pair{ first, second } );
return rv;
}
auto print_stacktrace()
-> void
{
EM_ASM(
{
console.log( stackTrace() );
} );
}
auto copy_body( Kmap& kmap
, Uuid const& src
, Uuid const& dst )
-> bool
{
auto rv = false;
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( kmap.fetch_body( src ) );
})
BC_POST([ &
, prev_src_body = kmap.fetch_body( src ) ]
{
if( rv )
{
auto const src_body = kmap.fetch_body( src );
auto const dst_body = kmap.fetch_body( dst );
BC_ASSERT( src_body
&& dst_body
&& src_body.value() == src_body.value() );
BC_ASSERT( dst_body
&& prev_src_body
&& dst_body.value() == prev_src_body.value() );
}
})
;
if( auto const src_body = kmap.fetch_body( src )
; src_body )
{
kmap.update_body( dst
, src_body.value() );
rv = true;
}
return rv;
}
auto move_body( Kmap& kmap
, Uuid const& src
, Uuid const& dst )
-> bool
{
auto rv = false;
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( kmap.fetch_body( src ) );
})
BC_POST([ &
, prev_src_body = kmap.fetch_body( src ) ]
{
if( rv )
{
auto const src_body = kmap.fetch_body( src );
auto const dst_body = kmap.fetch_body( dst );
BC_ASSERT( src_body && src_body.value().empty() );
BC_ASSERT( dst_body
&& prev_src_body
&& dst_body.value() == prev_src_body.value() );
}
})
;
if( copy_body( kmap
, src
, dst ) )
{
kmap.update_body( src
, "" );
rv = true;
}
return rv;
}
auto select_median_range( std::vector< Uuid > const& range
, Uuid const& median
, uint32_t const& max_radius )
-> std::vector< Uuid >
{
auto rv = std::vector< Uuid >{};
BC_CONTRACT()
BC_PRE([ & ]
{
BC_ASSERT( find( range, median ) != range.end() );
})
BC_POST([ & ]
{
BC_ASSERT( rv.size() <= ( max_radius * 2 ) );
})
;
auto med_it = find( range, median );
BC_ASSERT( med_it != range.end() );
auto const bit = [ & ]
{
if( std::distance( range.begin(), med_it ) < max_radius )
{
return range.begin();
}
else
{
return std::prev( med_it, max_radius );
}
}();
auto const eit = [ & ]
{
if( std::distance( med_it, range.end() ) < max_radius )
{
return range.end();
}
else
{
return std::next( med_it, max_radius );
}
}();
rv = std::vector< Uuid >{ bit, eit };
return rv;
}
auto select_median_range( std::vector< Uuid > const& range
, uint32_t const& max_radius )
-> UuidVec
{
if( range.empty() )
{
return {};
}
return select_median_range( range
, range[ range.size() / 2 ]
, max_radius );
}
auto fetch_siblings( Kmap const& kmap
, Uuid const& id )
-> UuidSet
{
auto rv = UuidSet{};
if( auto parent = kmap.fetch_parent( id )
; parent )
{
auto const siblings = [ & ]
{
auto const id_set = UuidSet{ id };
auto const children = kmap.fetch_children( parent.value() );
auto sibs = UuidSet{};
std::set_difference( children.begin(), children.end()
, id_set.begin(), id_set.end()
, std::inserter( sibs, sibs.end() ) );
return sibs;
}();
rv = siblings;
}
return rv;
}
auto fetch_siblings_ordered( Kmap const& kmap
, Uuid const& id )
-> UuidVec
{
auto rv = UuidVec{};
if( auto parent = kmap.fetch_parent( id )
; parent )
{
auto const siblings = [ & ]
{
auto const children = kmap.fetch_children_ordered( parent.value() );
return children
| views::remove( id )
| to_vector;
}();
rv = siblings;
}
return rv;
}
auto fetch_siblings_inclusive( Kmap const& kmap
, Uuid const& id )
-> UuidSet
{
return fetch_parent_children( kmap, id );
}
auto fetch_siblings_inclusive_ordered( Kmap const& kmap
, Uuid const& id )
-> UuidVec
{
return fetch_parent_children_ordered( kmap, id );
}
auto fetch_parent_children( Kmap const& kmap
, Uuid const& id )
-> UuidSet
{
auto rv = UuidSet{};
if( auto const parent = kmap.fetch_parent( id )
; parent )
{
rv = kmap.fetch_children( parent.value() );
}
return rv;
}
auto fetch_parent_children_ordered( Kmap const& kmap
, Uuid const& id )
-> UuidVec
{
auto rv = UuidVec{};
if( auto const parent = kmap.fetch_parent( id )
; parent )
{
rv = kmap.fetch_children_ordered( parent.value() );
}
return rv;
}
auto to_heading_path( Kmap const& kmap
, UuidVec const& lineage )
-> StringVec
{
return lineage
| views::transform( [ & ]( auto const& e ){ return kmap.fetch_heading( e ).value(); } )
| to< StringVec >();
}
auto to_heading_path_flat( Kmap const& kmap
, UuidVec const& lineage )
-> Heading
{
auto rv = lineage
| views::transform( [ & ]( auto const& e ){ return kmap.fetch_heading( e ).value(); } )
| to_vector;
return rv
| views::join( '.' )
| to< Heading >();
}
auto flatten( StringVec const& v
, char const c )
-> std::string
{
return v
| views::join( c )
| to< std::string >();
}
auto is_direct_descendant( Kmap const& kmap
, Uuid const& root
, Heading const& path )
-> bool
{
auto rv = false;
auto const lineage = path | views::split( '.' ) | to< StringVec >();
auto parent = root;
for( auto const& schild : lineage )
{
if( auto const child = kmap.fetch_child( parent, schild )
; child )
{
parent = child.value();
rv = true;
}
else
{
rv = false;
break;
}
}
return rv;
}
} // namespace kmap
| {"hexsha": "caec47889f6f6a510334cf8d7392765ceaf2502b", "size": 31463, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/utility.cpp", "max_stars_repo_name": "moralismercatus/kmap", "max_stars_repo_head_hexsha": "6887780c2fbe795f07a81808ef31f11dad4f5043", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-06-28T00:31:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-28T00:31:08.000Z", "max_issues_repo_path": "src/utility.cpp", "max_issues_repo_name": "moralismercatus/kmap", "max_issues_repo_head_hexsha": "6887780c2fbe795f07a81808ef31f11dad4f5043", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utility.cpp", "max_forks_repo_name": "moralismercatus/kmap", "max_forks_repo_head_hexsha": "6887780c2fbe795f07a81808ef31f11dad4f5043", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 22.3617626155, "max_line_length": 164, "alphanum_fraction": 0.477290786, "num_tokens": 7778} |
#include "context.hpp"
#include "service.hpp"
#include <asio_utp/socket.hpp>
#include <asio_utp/log.hpp>
#include <boost/asio/steady_timer.hpp>
#include <iostream>
using namespace std;
using namespace asio_utp;
struct context::ticker_type : public enable_shared_from_this<ticker_type> {
bool _running = false;
bool _outstanding = false;
asio::steady_timer _timer;
function<void()> _on_tick;
#if BOOST_VERSION >= 107000
ticker_type(asio::executor&& ex, function<void()> on_tick)
: _timer(move(ex))
, _on_tick(move(on_tick))
{
}
#else
ticker_type(asio::io_context::executor_type&& ex, function<void()> on_tick)
: _timer(ex.context())
, _on_tick(move(on_tick))
{
}
#endif
void start() {
if (_running) return;
_running = true;
if (_outstanding) return;
_timer.expires_after(chrono::milliseconds(500));
_outstanding = true;
_timer.async_wait([this, self = shared_from_this()]
(const sys::error_code& ec) {
_outstanding = false;
if (!_running) return;
_on_tick();
if (!_running) return;
_running = false;
start();
});
}
void stop() {
if (!_running) return;
_running = false;
_timer.cancel();
}
~ticker_type() {
stop();
}
};
uint64 context::callback_log(utp_callback_arguments* a)
{
log("LOG: ", a->socket, " ", a->buf);
return 0;
}
uint64 context::callback_sendto(utp_callback_arguments* a)
{
context* self = (context*) utp_context_get_userdata(a->context);
sys::error_code ec;
std::vector<asio::const_buffer> bufs { asio::buffer(a->buf, a->len) };
self->_multiplexer->send_to( bufs
, util::to_endpoint(*a->address)
, 0
, ec);
// The libutp library sometimes calls this function even after the last
// socket holding this context has received an EOF and closed.
// TODO: Should this be fixed in libutp?
if (ec && ec == asio::error::bad_descriptor) {
return 0;
}
if (ec && ec != asio::error::would_block) {
for (auto& s : self->_registered_sockets) {
s.close_with_error(ec);
}
}
return 0;
}
uint64 context::callback_on_error(utp_callback_arguments*)
{
return 0;
}
static const char* libutp_state_name(int state) {
switch(state) {
case UTP_STATE_CONNECT: return "UTP_STATE_CONNECT";
case UTP_STATE_WRITABLE: return "UTP_STATE_WRITABLE";
case UTP_STATE_EOF: return "UTP_STATE_EOF";
case UTP_STATE_DESTROYING: return "UTP_STATE_DESTROYING";
default: return "UNKNOWN";
}
}
uint64 context::callback_on_state_change(utp_callback_arguments* a)
{
auto socket = (socket_impl*) utp_get_userdata(a->socket);
auto* ctx = socket ? socket->_context.get() : nullptr;
if (ctx->_debug) {
log( ctx, " context::callback_on_state_change"
, " socket:" ,socket
, " new_state:" ,libutp_state_name(a->state));
}
if (!socket) {
// The utp::socket_impl has detached from this utp_socket
return 0;
}
switch(a->state) {
case UTP_STATE_CONNECT:
socket->on_connect();
break;
case UTP_STATE_WRITABLE:
socket->on_writable();
break;
case UTP_STATE_EOF:
socket->on_eof();
break;
case UTP_STATE_DESTROYING:
socket->on_destroy();
break;
}
return 0;
}
uint64 context::callback_on_read(utp_callback_arguments* a)
{
auto socket = (socket_impl*) utp_get_userdata(a->socket);
assert(socket);
socket->on_receive(a->buf, a->len);
return 0;
}
uint64 context::callback_on_firewall(utp_callback_arguments* a)
{
auto* self = (context*) utp_context_get_userdata(a->context);
if (self->_accepting_sockets.empty()) {
return 1;
}
return 0;
}
uint64 context::callback_on_accept(utp_callback_arguments* a)
{
auto* self = (context*) utp_context_get_userdata(a->context);
if (self->_accepting_sockets.empty()) return 0;
auto& s = self->_accepting_sockets.front();
self->_accepting_sockets.pop_front();
s.on_accept(a->socket);
return 0;
}
context::context(shared_ptr<udp_multiplexer_impl> m)
: _multiplexer(std::move(m))
, _local_endpoint(_multiplexer->local_endpoint())
, _utp_ctx(utp_init(2 /* version */))
{
if (_debug) {
log(this, " context::context()");
}
// TODO: Throw?
assert(_utp_ctx);
_recv_handle.handler = [&] ( const sys::error_code& ec
, const endpoint_type& ep
, const uint8_t* data
, size_t size) {
return on_read(ec, ep, data, size);
};
_ticker = make_shared<ticker_type>(get_executor(), [this] {
assert(_utp_ctx);
if (!_utp_ctx) return;
if (_debug) {
log(this, " context on_tick");
}
utp_check_timeouts(_utp_ctx);
});
utp_context_set_userdata(_utp_ctx, this);
#if UTP_DEBUG_LOGGING
utp_set_callback(_utp_ctx, UTP_LOG, &callback_log);
//utp_context_set_option(_utp_ctx, UTP_LOG_MTU, 1);
utp_context_set_option(_utp_ctx, UTP_LOG_NORMAL, 1);
utp_context_set_option(_utp_ctx, UTP_LOG_DEBUG, 1);
#endif
utp_set_callback(_utp_ctx, UTP_SENDTO, &callback_sendto);
utp_set_callback(_utp_ctx, UTP_ON_ERROR, &callback_on_error);
utp_set_callback(_utp_ctx, UTP_ON_STATE_CHANGE, &callback_on_state_change);
utp_set_callback(_utp_ctx, UTP_ON_READ, &callback_on_read);
utp_set_callback(_utp_ctx, UTP_ON_FIREWALL, &callback_on_firewall);
utp_set_callback(_utp_ctx, UTP_ON_ACCEPT, &callback_on_accept);
}
void context::register_socket(socket_impl& s) {
assert(!s._register_hook.is_linked());
bool was_empty = _registered_sockets.empty();
_registered_sockets.push_back(s);
if (was_empty) start();
}
void context::unregister_socket(socket_impl& s) {
assert(s._register_hook.is_linked());
s._register_hook.unlink();
if (_registered_sockets.empty()) stop();
}
void context::start_receiving()
{
if (_debug) {
log(this, " context start_receiving");
}
assert(_recv_handle.handler);
_ticker->start();
if (!_recv_handle.hook.is_linked())
_multiplexer->register_recv_handler(_recv_handle);
}
void context::start()
{
if (_debug) {
log(this, " context start");
}
}
void context::stop()
{
if (_debug) {
log(this, " context stop");
}
_ticker->stop();
}
void context::on_read( const sys::error_code& read_ec
, const endpoint_type& ep
, const uint8_t* data
, size_t size)
{
if (_debug) {
log(this, " context on_read data.size:", size
, " from:", ep);
}
sys::error_code ec;
if (!_multiplexer->available(ec)) {
utp_issue_deferred_acks(_utp_ctx);
}
if (read_ec) return;
sockaddr_storage src_addr = util::to_sockaddr(ep);
// XXX: This returns a boolean whether the data were handled or not.
// May be good to use it to decide whether to pass the data to other
// multiplexers.
utp_process_udp( _utp_ctx
, (unsigned char*) data
, size
, (sockaddr*) &src_addr
, util::sockaddr_size(src_addr));
if (!_multiplexer->available(ec)) {
utp_issue_deferred_acks(_utp_ctx);
}
if (_outstanding_op_count) start_receiving();
}
context::executor_type context::get_executor()
{
assert(_multiplexer && "TODO");
return _multiplexer->get_executor();
}
context::~context()
{
if (_debug) {
log(this, " ~context");
}
utp_destroy(_utp_ctx);
auto& s = asio::use_service<service>(_multiplexer->get_executor().context());
s.erase_context(_local_endpoint);
}
void context::increment_outstanding_ops(const char* dbg)
{
if (_debug) {
log(this, " context::increment_outstanding_ops "
, _outstanding_op_count, " -> ", (_outstanding_op_count + 1)
, " ", dbg, " (completed:", _completed_op_count, ")");
}
if (_outstanding_op_count++ == 0) {
start_receiving();
}
}
void context::decrement_outstanding_ops(const char* dbg)
{
if (_debug) {
log(this, " context::decrement_outstanding_ops "
, _outstanding_op_count, " -> ", (_outstanding_op_count - 1)
, " ", dbg, " (completed:", _completed_op_count, ")");
}
if (--_outstanding_op_count == 0 && _completed_op_count == 0) {
_ticker->stop();
}
}
void context::increment_completed_ops(const char* dbg)
{
if (_debug) {
log(this, " context::increment_completed_ops "
, _completed_op_count, " -> ", (_completed_op_count + 1)
, " ", dbg, " (outstanding:", _outstanding_op_count, ")");
}
_completed_op_count++;
}
void context::decrement_completed_ops(const char* dbg)
{
if (_debug) {
log(this, " context::decrement_completed_ops "
, _completed_op_count, " -> ", (_completed_op_count - 1)
, " ", dbg, " (outstanding:", _outstanding_op_count, ")");
}
if (--_completed_op_count == 0 && _outstanding_op_count == 0) {
_ticker->stop();
}
}
| {"hexsha": "66ed6ea224c4f7611b3f32ad0e02f3616bf10f0f", "size": 9777, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/context.cpp", "max_stars_repo_name": "inetic/asio-utp", "max_stars_repo_head_hexsha": "3c058a9c7d2ae934764d0aaa72e960870e39950a", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 28.0, "max_stars_repo_stars_event_min_datetime": "2018-05-23T05:53:20.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T22:13:20.000Z", "max_issues_repo_path": "src/context.cpp", "max_issues_repo_name": "inetic/asio-utp", "max_issues_repo_head_hexsha": "3c058a9c7d2ae934764d0aaa72e960870e39950a", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 5.0, "max_issues_repo_issues_event_min_datetime": "2019-01-03T10:09:20.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-11T08:56:39.000Z", "max_forks_repo_path": "src/context.cpp", "max_forks_repo_name": "equalitie/asio-utp", "max_forks_repo_head_hexsha": "3c058a9c7d2ae934764d0aaa72e960870e39950a", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 1.0, "max_forks_repo_forks_event_min_datetime": "2019-05-19T18:25:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-19T18:25:16.000Z", "avg_line_length": 26.3530997305, "max_line_length": 81, "alphanum_fraction": 0.5939449729, "num_tokens": 2433} |
from analysis.jobAnalysis import calculate_histogram
import pickle
import MySQLdb
import numpy as np
class Result(object):
"""
Abstract class for results on a workload analysis. Such a class has to be
able to calculate some data over a data set. This class stores the results
in an database, data can also be retrieved from a DB.
Should expose data in a way that can be plotted.
"""
def __init__(self, table_name, keys=None):
""" Constructor
Args:
- table_name: string containing the database table name that
store data from this Result.
- keys: list of unique strings identifying each component of a result.
"""
if keys is None:
keys=[]
self._data = {}
self._table_name=table_name
self._keys= keys
def calculate(self, dataset):
"""
Calculate's statistics on dataset and stores results in self._data.
"""
pass
def store(self, db_obj, trace_id, measurement_type):
"""
Stores the content of self._data in the self._table_name table.
Args:
- db_obj: Data DBManager object to interact with database.
Returns: the primary key identifying the result entry in the
database.
Raises SystemError exception if insertiln fails.
"""
keys = list(self._data.keys())
values = [self._encode(self._data[key], key) for key in keys]
keys = ["trace_id", "type"] + keys
values= [trace_id, measurement_type] + values
ok, insert_id = db_obj.insertValues(self._table_name, keys, values,
get_insert_id=True)
if not ok:
raise SystemError("Data insertion failed")
return insert_id
def load(self, db_obj, trace_id, measurement_type):
"""
Loads self._data from the row indentified by data_id in self._table_name
table.
Args:
- db_obj: DBManager object allows access to a database.
- data_id: id of the record to load for the database.
"""
keys = self._keys
data_dic=db_obj.getValuesDicList(self._table_name, keys, condition=
"trace_id={0} and type='{1}'".format(
trace_id, measurement_type))
if data_dic is not None and data_dic != ():
for key in keys:
self._set(key, self._decode(data_dic[0][key], key))
def get_data(self):
return self._data
def _set(self, data_name, data_value):
self._data[data_name] = data_value
def _get(self, data_name):
if data_name in list(self._data.keys()):
return self._data[data_name]
return None
def _encode(self, data_value, key):
"""
Encodes data_value to the format of a column of the table used by
this class. To be re-implemented in child classes as the table
defintion will change."""
return data_value
def _decode(self, blob, key):
"""
Decodes blob from the format outputed by a dabatase query. To be
re-implemented in child classes as the table implementation will
change."""
return blob
def create_table(self, db_obj):
"""
Creates the table associated with this Result class.
Args:
- db_obj: DBManager object allows access to a database.
"""
db_obj.doUpdate(self._create_query())
def _create_query(self):
"""Returns a string with the query needed to create a table
corresponding to this Result class. To be modifed according to the table
formats required by the child classes."""
return ""
def get_list_of_results(self, db_obj, trace_id):
"""Returns a list of the result types corresponding to this Result that
are for a trace identified by trace_id.
Args:
- db_obj: DBMaster connected object.
- trace_id: integer id of a trace
"""
lists = db_obj.getValuesAsColumns(
self._table_name, ["type"],
condition = "trace_id={0}".format(trace_id))
return lists["type"]
def plot(self, file_name):
"""Plots results on a filename"""
pass
class Histogram(Result):
"""
Histogram Result class. It produces an histogram (bins and edges) on a
dataset.
"""
def __init__(self):
super(Histogram,self).__init__(table_name="histograms",
keys = ["bins", "edges"])
def calculate(self, data_set, bin_size, minmax=None, input_bins=None):
"""
Calculates the histogram according to the data_set.
Args:
- data_set: list of numbers to be analyzed.
- bin_size: float pointing to the size of the output bins.
- minmax: tuple (min, max) numbers to perform the histogram over.
- input_bins: list of edges to be used in the histogram. if set it
overrides bin_size.
"""
if bin_size is None and minmax is None:
raise ValueError("Either bin_size or bin has to be set")
bins, edges = calculate_histogram(data_set, th_min=0.0, th_acc=0.0,
range_values=minmax,
interval_size=bin_size,
bins=input_bins)
self._set("bins", bins)
self._set("edges", edges)
def get_data(self):
return self._get("bins"), self._get("edges")
def _create_query(self):
return """create table {0} (
id INT NOT NULL AUTO_INCREMENT,
trace_id INT(10) NOT NULL,
type VARCHAR(128) NOT NULL,
bins LONGBLOB,
edges LONGBLOB,
PRIMARY KEY(id, trace_id, type)
)""".format(self._table_name)
def _encode(self, data_value, key):
import codecs
"""Datbase uses blobls to store the edges and bins"""
pickle_data = pickle.dumps(data_value)
return codecs.encode(pickle_data, "base64").decode()
def _decode(self, blob, key):
import codecs
pickle_data=codecs.decode(blob, "base64")
return pickle.loads(pickle_data)
class NumericList(Result):
def _create_query(self):
cad= """create table `{0}` (
id INT NOT NULL AUTO_INCREMENT,
trace_id INT(10) NOT NULL,
type VARCHAR(128) NOT NULL,
""".format(self._table_name)
for field in self._keys:
cad+=" {0} DOUBLE,".format(field)
cad+=""" PRIMARY KEY(id, trace_id, type))"""
return cad
def set_dic(self, the_dic):
for (key,value) in the_dic.items():
self._set(key, value)
def apply_factor(self, factor):
for key in self._keys:
self._set(key, float(self._get(key))*float(factor))
class NumericStats(Result):
"""
Does a basic analysis over a dataset including: minimum, maximum, mean,
standard deviation, dataset count, median and five percentiles (5, 25, 50
75,95).
Returned object by get_data is a dictionary indexed by these keys: "min",
"max", "mean", "std", "count", "median", "p05", "p25", "p50", "p75", "p95".
"""
def __init__(self):
super(NumericStats,self).__init__(table_name="numericStats",
keys = ["min", "max", "mean", "std", "count", "median",
"p05", "p25", "p50", "p75", "p95" ])
def apply_factor(self, factor):
for key in ["min", "max", "mean", "std", "median",
"p05", "p25", "p50", "p75", "p95" ]:
self._set(key, float(self._get(key))*float(factor))
def calculate(self, data_set):
"""Calculates a number of numerica statistical metrics over the numbers
in the data_Set list.
"""
x = np.array(data_set, dtype=np.float)
self._set("min", min(x))
self._set("max", max(x))
self._set("mean", np.mean(x))
self._set("std", np.std(x))
self._set("count", x.shape[0])
percentile_name=["p05", "p25", "p50", "p75", "p95"]
percentlie_values = np.percentile(x, [5, 25, 50, 75, 95])
self._set("median", percentlie_values[2])
for (key, per) in zip(percentile_name, percentlie_values):
self._set(key, per)
def _encode(self, data_value, key):
return data_value
def _decode(self, blob, key):
return float(blob)
def _create_query(self):
return """create table {0} (
id INT NOT NULL AUTO_INCREMENT,
trace_id INT(10) NOT NULL,
type VARCHAR(128) NOT NULL,
min DOUBLE,
max DOUBLE,
mean DOUBLE,
std DOUBLE,
count int,
median DOUBLE,
p05 DOUBLE,
p25 DOUBLE,
p50 DOUBLE,
p75 DOUBLE,
p95 DOUBLE,
PRIMARY KEY(id, trace_id, type)
)""".format(self._table_name)
def get_values_boxplot(self):
data_names = "median", "p25", "p75", "min", "max"
return [self._get(x) for x in data_names]
def calculate_results(data_list, field_list, bin_size_list,
minmax_list, store=False, db_obj=None, trace_id=None):
"""Calculates CDF and Stats result over the lists of values in data_list.
Sets the results as variables of caller_obj.
Args:
- caller_obj: Object over which the results will be set as
"_[field_name]_cdf" for Histogram objects and "_[field_name]_stats"
for NumericStats objects.
- data_list: lists of lists of values to be analyzed.
- field_list: list of strings with the name of the data set in the same
position as in data_list.
- bin_size_list: list of Bin sizes to be used for the CDF analysis of the
dataset in the same position at data_list.
- minmax_list: lists if tuples of numbers with the maximum and minimum
values to use in each CDF analysis.
- store: if True, the resulting Result objects will store their content
in a database.
- db_obj: DBManager object configured to access a database on which data
was stored.
- trace_id: Numeric ID of the trace originating the data in data_list.
"""
cdf_field_list = [x+"_cdf" for x in field_list]
stats_field_list = [x+"_stats" for x in field_list]
results_dic={}
for (data, cdf_field, stats_field, bin_size, minmax) in zip(data_list,
cdf_field_list, stats_field_list, bin_size_list, minmax_list):
if data:
cdf = Histogram()
cdf.calculate(data, bin_size=bin_size, minmax=minmax)
if store:
cdf.store(db_obj, trace_id, cdf_field)
results_dic[cdf_field]=cdf
stats = NumericStats()
if data:
stats.calculate(data)
if store:
stats.store(db_obj, trace_id, stats_field)
results_dic[stats_field]=stats
return results_dic
def load_results(field_list, db_obj, trace_id):
"""Creates a number of Histogram and NumericStats objects, populate from
the database and set them as variables of caller_obj.
Args:
- caller_obj: Object where the result objects will be set.
- field_list: type of data the the results are loaded. Corresponding results
will be pulled for Histogram and NumericStats tables.
- db_obj: DBManager object configured to access a database from which data
will be retrieved.
- trace_id: numeric id identifying the trace to which the data should be
loaded.
"""
results = {}
cdf_field_list = [x+"_cdf" for x in field_list]
stats_field_list = [x+"_stats" for x in field_list]
for (cdf_field, stats_field) in zip(cdf_field_list, stats_field_list):
cdf = Histogram()
cdf.load(db_obj, trace_id, cdf_field)
if cdf is not None:
results[cdf_field] = cdf
stats = NumericStats()
stats.load(db_obj, trace_id, stats_field)
if stats is not None:
results[stats_field] = stats
return results
| {"hexsha": "032cb054d7dbf5df5971c35c073df08fa1f7f97d", "size": 12728, "ext": "py", "lang": "Python", "max_stars_repo_path": "stats/__init__.py", "max_stars_repo_name": "gonzalorodrigo/ScSFWorkload", "max_stars_repo_head_hexsha": "2301dacf486df8ed783c0ba33cbbde6e9978c17e", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-18T18:27:49.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-18T18:27:49.000Z", "max_issues_repo_path": "stats/__init__.py", "max_issues_repo_name": "gonzalorodrigo/ScSFWorkload", "max_issues_repo_head_hexsha": "2301dacf486df8ed783c0ba33cbbde6e9978c17e", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2020-12-17T21:33:15.000Z", "max_issues_repo_issues_event_max_datetime": "2020-12-17T21:35:41.000Z", "max_forks_repo_path": "stats/__init__.py", "max_forks_repo_name": "gonzalorodrigo/ScSFWorkload", "max_forks_repo_head_hexsha": "2301dacf486df8ed783c0ba33cbbde6e9978c17e", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-05T08:23:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-05T08:23:20.000Z", "avg_line_length": 37.7685459941, "max_line_length": 80, "alphanum_fraction": 0.5793526084, "include": true, "reason": "import numpy", "num_tokens": 2867} |
# -*- coding: utf-8 -*-
# from functools import reduce
import numpy as np
import talib
from .context import ExecutionContext
from .utils import FormulaException, rolling_window, handle_numpy_warning
from .time_series import (
MarketDataSeries,
NumericSeries,
BoolSeries,
fit_series,
get_series,
get_bars,
ensure_timeseries,
)
from .helper import zig_helper
# ignore pandas warning
import warnings
__updated__ = "2021-06-24"
warnings.simplefilter(action='ignore', category=FutureWarning)
# delete nan of series for error made by some operator
def filter_begin_nan(series):
i = 0
for x in series:
if np.isnan(x):
i += 1
else:
break
return series[i:]
class ArgumentSeriesBase(NumericSeries):
def getFunc(self):
"""EXAMPLE:
def getFunc(self):
return talib.MA
"""
raise NotImplementedError
class OneArgumentSeries(ArgumentSeriesBase):
def __init__(self, series, arg):
if isinstance(series, NumericSeries):
series = series.series
try:
if series.dtype != int:
series[np.isinf(series)] = np.nan
# print(f"series type:{type(series)}; self.func: {help(self.func)}")
# func = self.getFunc()
# series = func(series, arg)
series = self.getFunc()(series, arg)
# series = filter_begin_nan(series)
except Exception as e:
if series.dtype == int:
series = self.getFunc()(series.astype(float), arg)
else:
print(f"series error: {series}")
raise FormulaException(e)
super(ArgumentSeriesBase, self).__init__(series)
self.extra_create_kwargs["arg"] = arg
# def __init__(self, series, arg):
# if isinstance(series, NumericSeries):
# series = series.series
#
# try:
# series[np.isinf(series)] = np.nan
# print(f"series type:{type(series)}; self.func: {help(self.func)}")
# series = self.func(series, arg)
# except Exception as e:
# raise FormulaException(e)
# super(OneArgumentSeries, self).__init__(series)
# self.extra_create_kwargs["arg"] = arg
class MovingAverageSeries(OneArgumentSeries):
"""http://www.tadoc.org/indicator/MA.htm"""
def getFunc(self):
return talib.MA
class WeightedMovingAverageSeries(OneArgumentSeries):
"""http://www.tadoc.org/indicator/WMA.htm"""
def getFunc(self):
return talib.WMA
class ExponentialMovingAverageSeries(OneArgumentSeries):
"""http://www.fmlabs.com/reference/default.htm?url=ExpMA.htm"""
# func = talib.EMA
def getFunc(self):
return talib.EMA
class KAMASeries(OneArgumentSeries):
"""Kaufman's Adaptative Moving Average
传统的移动均线包括简单移动均线,加权移动均线以及指数式移动均线,它们有着固有的弱点——慢趋势和滞后。
短周期的均线系统虽然能快速反映期货价格的走势,但是又难以抵抗价格“噪音”的干扰,多数情况下短周期所给出的趋势信号并不准确。
为了避免短期噪音产生的虚假信号与长期趋势中的滞后,考夫曼提出来“自适应的”均线系统,AMA。AMA可以在市场沿一个方向快速移动的时候,使用快的移动平均值,而在价格在横盘的市场中拉锯时,使用慢速的移动平均值。
ER是净价格变动与总价格位移的比值,用来度量交易日价格的变化程度,计算很简单。ER的范围从0到1,ER的值越接近1说明市场趋势越快,ER的值越接近0,表明市场噪声越多。
ER = Change/Volatility
Change = ABS(Close - Close (10 periods ago))
Volatility = Sum10(ABS(Close - Prior Close))
根据ER以及两个指数平滑(exponential moving average)常数,可以推导出
SC(Smoothing Constant)= [ER x (fastest SC - slowest SC) + slowest]
其中,fatest SC=2/(2+1), slowest SC=2/(30+1)。SC表征趋势速度,ER变大的过程可以看成是趋势由慢转快的过程,SC与ER成正比例变化。值得指出的是,在此取平方是为了在市场横盘阶段更好地阻止趋势均线的移动。
这样,我们就可以得到:
Current KAMA = Prior KAMA + SC x (Price - Prior KAMA)
KAMA是自适应的,速度由平滑系数SC决定。
"""
def getFunc(self):
return talib.KAMA
class StdSeries(OneArgumentSeries):
def getFunc(self):
return talib.STDDEV
def __init__(self, series, arg):
super().__init__(series, arg)
if arg > 1:
# 使用样本方差
a = (arg / (arg - 1)) ** 0.5
self._series = self.series * a
class TwoArgumentSeries(ArgumentSeriesBase):
# class TwoArgumentSeries(NumericSeries):
def __init__(self, series, arg1, arg2=None):
if isinstance(series, NumericSeries):
series = series.series
try:
series[np.isinf(series)] = np.nan
series = self.getFunc()(series, arg1, arg2)
series = filter_begin_nan(series)
except Exception as e:
raise FormulaException(e)
super(TwoArgumentSeries, self).__init__(series)
self.extra_create_kwargs["arg1"] = arg1
self.extra_create_kwargs["arg2"] = arg2
class SMASeries(TwoArgumentSeries):
"""同花顺专用SMA"""
def getFunc(self):
return self.func
def func(self, series, n, _):
results = np.nan_to_num(series).copy()
# FIXME this is very slow
# https://stackoverflow.com/questions/42869495/numpy-version-of-exponential-weighted-moving-average-equivalent-to-pandas-ewm
for i in range(1, len(series)):
results[i] = ((n - 1) * results[i - 1] + results[i]) / n
return results
class CCISeries(TwoArgumentSeries):
def getFunc(self):
return talib.CCI
def __init__(self, high, low, close):
if isinstance(high, NumericSeries) and isinstance(low, NumericSeries) and isinstance(close, NumericSeries):
series1 = low.series
series2 = close.series
try:
series1[series1 == np.inf] = np.nan
series2[series2 == np.inf] = np.nan
except Exception as e:
raise FormulaException(e)
super(CCISeries, self).__init__(high, series1, series2)
# def __init__(self, high, low, close):
# if isinstance(high, NumericSeries) and isinstance(low, NumericSeries) and isinstance(close, NumericSeries):
# series0 = high.series
# series1 = low.series
# series2 = close.series
#
# try:
# series0[series0 == np.inf] = np.nan
# series1[series1 == np.inf] = np.nan
# series2[series2 == np.inf] = np.nan
# func = self.getFunc()
# # print(func, help(func))
# series = func(series0, series1, series2)
# # series = (self.getFunc())(series0, series1, series2)
# except Exception as e:
# raise FormulaException(e)
# super(CCISeries, self).__init__(series)
class SumSeries(NumericSeries):
"""求和"""
def __init__(self, series, period):
if isinstance(series, NumericSeries):
series = series.series
try:
series[np.isinf(series)] = 0
series = talib.SUM(series, period)
except Exception as e:
raise FormulaException(e)
super(SumSeries, self).__init__(series)
self.extra_create_kwargs["period"] = period
class AbsSeries(NumericSeries):
def __init__(self, series):
if isinstance(series, NumericSeries):
series = series.series
try:
series[np.isinf(series)] = 0
# series[series == np.inf] = 0
# series[series == -np.inf] = 0
series = np.abs(series)
except Exception as e:
raise FormulaException(e)
super(AbsSeries, self).__init__(series)
@handle_numpy_warning
def CrossOver(s1, s2):
"""s1金叉s2
:param s1:
:param s2:
:returns: bool序列
:rtype: BoolSeries
"""
s1, s2 = ensure_timeseries(s1), ensure_timeseries(s2)
series1, series2 = fit_series(s1.series, s2.series)
cond1 = series1 > series2
series1, series2 = fit_series(s1[1].series, s2[1].series)
cond2 = series1 <= series2 # s1[1].series <= s2[1].series
cond1, cond2 = fit_series(cond1, cond2)
s = cond1 & cond2
return BoolSeries(s)
def Ref(s1, n):
"""引用若干周期前的数据(平滑处理).
用法:
REF(X,A),引用A周期前的X值.A可以是变量.
平滑处理:当引用不到数据时进行的操作.此函数中,平滑时使用上一个周期的引用值.
例如:
REF(CLOSE,BARSCOUNT(C)-1)表示第二根K线的收盘价.
"""
if isinstance(n, NumericSeries):
return s1[int(n.value)]
return s1[n]
@handle_numpy_warning
def minimum(s1, s2):
s1, s2 = ensure_timeseries(s1), ensure_timeseries(s2)
if len(s1) == 0 or len(s2) == 0:
raise FormulaException("minimum size == 0")
series1, series2 = fit_series(s1.series, s2.series)
s = np.minimum(series1, series2)
return NumericSeries(s)
@handle_numpy_warning
def maximum(s1, s2):
s1, s2 = ensure_timeseries(s1), ensure_timeseries(s2)
if len(s1) == 0 or len(s2) == 0:
raise FormulaException("maximum size == 0")
series1, series2 = fit_series(s1.series, s2.series)
s = np.maximum(series1, series2)
return NumericSeries(s)
@handle_numpy_warning
def count(cond, n):
"""统计满足条件的周期数.
用法:
COUNT(X,N),统计N周期中满足X条件的周期数,若N<0则从第一个有效值开始.
例如:
COUNT(CLOSE>OPEN,20)表示统计20周期内收阳的周期数
"""
# TODO lazy compute
series = cond.series
size = len(cond.series) - n
try:
# result = np.full(size, 0, dtype=np.int)
result = np.full(size, 0, dtype=int)
except ValueError as e:
raise FormulaException(e)
for i in range(size - 1, 0, -1):
s = series[-n:]
result[i] = len(s[s == True])
series = series[:-1]
return NumericSeries(result)
@handle_numpy_warning
def every(cond, n):
return count(cond, n) == n
@handle_numpy_warning
def hhv(s, n):
# TODO lazy compute
series = s.series
# size = len(s.series) - n
# try:
# # result = np.full(size, 0, dtype=np.float64)
# # result = np.full(size, 0, dtype=float)
# pass
# except ValueError as e:
# raise FormulaException(e)
if 0 < n < len(series):
result = np.max(rolling_window(series, n), 1)
result = np.append(np.array([np.nan] * (n - 1)), result)
else:
result = np.array([np.max(series)])
if n > 0:
result = np.append(np.array([np.nan] * (len(series) - 1)), result)
return NumericSeries(result)
@handle_numpy_warning
def llv(s, n):
"""LLV(X,N),求N周期内X最低值,N=0则从第一个有效值开始.
例如: LLV(LOW,N)表示N个周期内的最低价;
LLV(LOW,0)表示求历史最低价;
"""
# TODO lazy compute
series = s.series
# size = len(s.series) - n
# try:
# pass
# # result = np.full(size, 0, dtype=np.float64)
# # result = np.full(size, 0, dtype=float)
# except ValueError as e:
# raise FormulaException(e)
if 0 < n < len(series):
result = np.min(rolling_window(series, n), 1)
result = np.append(np.array([np.nan] * (n - 1)), result)
else:
result = np.array([np.min(series)])
if n > 0:
result = np.append(np.array([np.nan] * (len(series) - 1)), result)
return NumericSeries(result)
@handle_numpy_warning
def hhvbars(s, n):
"""HHVBARS 上一高点位置 求上一高点到当前的周期数.
用法: HHVBARS(X,N):求N周期内X最高值到当前周期数,N=0表示从第一个有效值开始统计
例如:HHVBARS(HIGH,0)求得历史新高到到当前的周期数
"""
# TODO lazy compute
series = s.series
size = len(s.series) - n
try:
# result = np.full(size, 0, dtype=np.float64)
result = np.full(size, 0, dtype=float)
except ValueError as e:
raise FormulaException(e)
result = np.argmax(rolling_window(series, n), 1)
return NumericSeries(result)
@handle_numpy_warning
def llvbars(s, n):
"""LLVBARS 上一低点位置 求上一低点到当前的周期数.
用法: LLVBARS(X,N):求N周期内X最低值到当前周期数,N=0表示从第一个有效值开始统计
例如:LLVBARS(LOW,20)求得20日最低点到当前的周期数
"""
# TODO lazy compute
series = s.series
size = len(s.series) - n
try:
# result = np.full(size, 0, dtype=np.float64)
result = np.full(size, 0, dtype=float)
except ValueError as e:
raise FormulaException(e)
result = np.argmin(rolling_window(series, n), 1)
return NumericSeries(result)
@handle_numpy_warning
def iif(condition, true_statement, false_statement):
"""IF 逻辑判断 根据条件求不同的值。
用法: IF(X,A,B) 若X不为0则返回A,否则返回B。
例如: IF(CLOSE>OPEN,HIGH,LOW)表示该周期收阳则返回最高值,否则返回最低值。
IFF 逻辑判断 根据条件求不同的值。
用法: IFF(X,A,B) 若X不为0则返回A,否则返回B。
例如: IFF(CLOSE>OPEN,HIGH,LOW) 表示该周期收阳则返回最高值,否则返回最低值。
"""
n = len(condition)
series1 = get_series(true_statement, n)
series2 = get_series(false_statement, n)
cond_series, series1, series2 = fit_series(
condition.series, series1, series2)
series = series2.copy()
series[cond_series] = series1[cond_series]
return NumericSeries(series)
@handle_numpy_warning
def ceiling(s):
"""CEILING 向上舍入 向上舍入。 用法: CEILING(A) 返回沿A数值增大方向最接近的整数。
例如: CEILING(12.3) 求得13,CEILING(-3.5)求得-3。 FLOOR 向下舍入 向下舍入。 用法: FLOOR(A) 返回沿A数值减小方向最接近的整数。
"""
series = s.series
return NumericSeries(np.ceil(series))
@handle_numpy_warning
def const(s):
if isinstance(s, NumericSeries):
return NumericSeries(s.series)
elif isinstance(s, np.ndarray):
return NumericSeries(s)
else:
return NumericSeries(np.array([s]))
@handle_numpy_warning
def drawnull(s):
"""DRAWNULL 无效数 返回无效数。
用法: DRAWNULL
例如: IF(CLOSE>REF(CLOSE,1),CLOSE,DRAWNULL) 表示下跌时分析图上不画线。 BACKSET 向前赋值
"""
pass
@handle_numpy_warning
def zig(s, n):
"""ZIG 之字转向 之字转向。 用法: ZIG(K,N) 当价格变化量超过N%时转向,K表示0:开盘价,1:最高价, 2:最低价,3:收盘价,其余:数组信息
例如: ZIG(3,5) 表示收盘价的5%的ZIG转向。
"""
series = s.series
assert isinstance(series, np.ndarray)
z, _ = zig_helper(series, n)
return NumericSeries(z)
@handle_numpy_warning
def troughbars(s, n, m):
"""TROUGHBARS 波谷位置 前M个ZIG转向波谷到当前距离。
用法: TROUGHBARS(K,N,M) 表 示之字转向ZIG(K,N)的前M个波谷到当前的周期数,M必须大于等于1。
例如: TROUGH(2,5,2) 表示%5最低价ZIG转向的前2个波谷到当前的周期数。
"""
series = s.series
assert isinstance(series, np.ndarray)
z, peers = zig_helper(series, n)
z_in_p = [z[i] for i in peers]
count = 0
for i in range(len(z_in_p) - 1, 1, -1):
if count == m:
return i
if z_in_p[i] < z_in_p[i - 1]:
count += 1
return 0
@handle_numpy_warning
def barslast(statement):
"""BARSLAST 上一条件成立位置 上一次条件成立到当前的周期数.
用法: BARSLAST(X):上一次X不为0到现在的天数,
例如:BARSLAST(CLOSE/REF(CLOSE,1)>=1.
"""
series = get_series(statement)
size = len(series)
end = size
begin = size - 1
try:
result = np.full(size, 1e16, dtype=int)
except ValueError as e:
raise FormulaException(e)
for s in series[::-1]:
if s:
result[begin:end] = range(0, end - begin)
end = begin
begin -= 1
return NumericSeries(result)
@handle_numpy_warning
def mular(series, n):
"""求累乘.
用法:
MULAR(X,N),统计N周期中X的乘积.N=0则从第一个有效值开始.
例如:
MULAR(C/REF(C,1),0)表示统计从上市第一天以来的复利
"""
raise Exception("not implement!!!")
# @handle_numpy_warning
def upnday(s, m: int):
"""返回是否连涨周期数.
用法:
UPNDAY(CLOSE,M)
表示连涨M个周期,M为常量
"""
# s = get_series(s)
return every(s > Ref(s, 1), m)
@handle_numpy_warning
def downnday(s, m: int):
"""返回是否连跌周期.
用法:
DOWNNDAY(CLOSE,M)
表示连跌M个周期,M为常量
"""
return every(s < Ref(s, 1), m)
def nday(s1, s2, m: int):
"""返回是否持续存在X>Y
用法:
NDAY(CLOSE,OPEN,3)
表示连续3日收阳线
"""
return every(s1 > s2, m)
def codelike(s: str):
"""品种代码是否以参数开头.
用法:
if(CODELIKE('600'),x,y);
"""
# todo
raise Exception("not implemented")
| {"hexsha": "be159baeaf8d4a85b407bfe8b75a9949086767a3", "size": 15393, "ext": "py", "lang": "Python", "max_stars_repo_path": "funcat/func.py", "max_stars_repo_name": "pchaos/funcat2", "max_stars_repo_head_hexsha": "ff554cc134906a5a182fc31774488d62a839b314", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2021-05-06T01:17:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-09T16:56:51.000Z", "max_issues_repo_path": "funcat/func.py", "max_issues_repo_name": "pchaos/funcat2", "max_issues_repo_head_hexsha": "ff554cc134906a5a182fc31774488d62a839b314", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "funcat/func.py", "max_forks_repo_name": "pchaos/funcat2", "max_forks_repo_head_hexsha": "ff554cc134906a5a182fc31774488d62a839b314", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-05-26T14:25:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-09T16:56:44.000Z", "avg_line_length": 27.1003521127, "max_line_length": 132, "alphanum_fraction": 0.6119664783, "include": true, "reason": "import numpy", "num_tokens": 5121} |
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy
import numpy as np
from pyscf.pbc import gto as pgto
import pyscf.pbc.dft as pdft
from pyscf.pbc.df import fft, aft, mdf
##################################################
#
# port from ao2mo/eris.py
#
##################################################
from pyscf import lib
from pyscf.pbc import lib as pbclib
from pyscf.pbc.dft.gen_grid import gen_uniform_grids
from pyscf.pbc.dft.numint import eval_ao
from pyscf.pbc import tools
einsum = np.einsum
"""
(ij|kl) = \int dr1 dr2 i*(r1) j(r1) v(r12) k*(r2) l(r2)
= (ij|G) v(G) (G|kl)
i*(r) j(r) = 1/N \sum_G e^{iGr} (G|ij)
= 1/N \sum_G e^{-iGr} (ij|G)
"forward" FFT:
(G|ij) = \sum_r e^{-iGr} i*(r) j(r) = fft[ i*(r) j(r) ]
"inverse" FFT:
(ij|G) = \sum_r e^{iGr} i*(r) j(r) = N * ifft[ i*(r) j(r) ]
= conj[ \sum_r e^{-iGr} j*(r) i(r) ]
"""
def general(cell, mo_coeffs, kpts=None, compact=0):
'''pyscf-style wrapper to get MO 2-el integrals.'''
assert len(mo_coeffs) == 4
if kpts is not None:
assert len(kpts) == 4
return get_mo_eri(cell, mo_coeffs, kpts)
def get_mo_eri(cell, mo_coeffs, kpts=None):
'''Convenience function to return MO 2-el integrals.'''
mo_coeff12 = mo_coeffs[:2]
mo_coeff34 = mo_coeffs[2:]
if kpts is None:
kpts12 = kpts34 = q = None
else:
kpts12 = kpts[:2]
kpts34 = kpts[2:]
q = kpts12[0] - kpts12[1]
#q = kpts34[1] - kpts34[0]
if q is None:
q = np.zeros(3)
mo_pairs12_kG = get_mo_pairs_G(cell, mo_coeff12, kpts12)
mo_pairs34_invkG = get_mo_pairs_invG(cell, mo_coeff34, kpts34, q)
return assemble_eri(cell, mo_pairs12_kG, mo_pairs34_invkG, q)
def get_mo_pairs_G(cell, mo_coeffs, kpts=None, q=None):
'''Calculate forward (G|ij) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_G : (ngrids, nmoi*nmoj) ndarray
The FFT of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_G = np.zeros([ngrids,nmoi*nmoj], np.complex128)
fac = np.exp(-1j*np.dot(coords, q))
for i in range(nmoi):
for j in range(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R_ij, cell.mesh, fac)
return mo_pairs_G
def get_mo_pairs_invG(cell, mo_coeffs, kpts=None, q=None):
'''Calculate "inverse" (ij|G) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_invG : (ngrids, nmoi*nmoj) ndarray
The inverse FFTs of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
#mo_pairs_R = einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_invG = np.zeros([ngrids,nmoi*nmoj], np.complex128)
fac = np.exp(1j*np.dot(coords, q))
for i in range(nmoi):
for j in range(nmoj):
mo_pairs_R_ij = np.conj(moiR[:,i])*mojR[:,j]
mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R_ij), cell.mesh, fac))
return mo_pairs_invG
def get_mo_pairs_G_old(cell, mo_coeffs, kpts=None, q=None):
'''Calculate forward (G|ij) and "inverse" (ij|G) FFT of all MO pairs.
TODO: - Implement simplifications for real orbitals.
Args:
mo_coeff: length-2 list of (nao,nmo) ndarrays
The two sets of MO coefficients to use in calculating the
product |ij).
Returns:
mo_pairs_G, mo_pairs_invG : (ngrids, nmoi*nmoj) ndarray
The FFTs of the real-space MO pairs.
'''
coords = gen_uniform_grids(cell)
if kpts is None:
q = np.zeros(3)
aoR = eval_ao(cell, coords)
ngrids = aoR.shape[0]
if np.array_equal(mo_coeffs[0], mo_coeffs[1]):
nmoi = nmoj = mo_coeffs[0].shape[1]
moiR = mojR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
else:
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR, mo_coeffs[1])
else:
if q is None:
q = kpts[1]-kpts[0]
aoR_ki = eval_ao(cell, coords, kpt=kpts[0])
aoR_kj = eval_ao(cell, coords, kpt=kpts[1])
ngrids = aoR_ki.shape[0]
nmoi = mo_coeffs[0].shape[1]
nmoj = mo_coeffs[1].shape[1]
moiR = einsum('ri,ia->ra', aoR_ki, mo_coeffs[0])
mojR = einsum('ri,ia->ra', aoR_kj, mo_coeffs[1])
mo_pairs_R = np.einsum('ri,rj->rij', np.conj(moiR), mojR)
mo_pairs_G = np.zeros([ngrids,nmoi*nmoj], np.complex128)
mo_pairs_invG = np.zeros([ngrids,nmoi*nmoj], np.complex128)
fac = np.exp(-1j*np.dot(coords, q))
for i in range(nmoi):
for j in range(nmoj):
mo_pairs_G[:,i*nmoj+j] = tools.fftk(mo_pairs_R[:,i,j], cell.mesh, fac)
mo_pairs_invG[:,i*nmoj+j] = np.conj(tools.fftk(np.conj(mo_pairs_R[:,i,j]), cell.mesh,
fac.conj()))
return mo_pairs_G, mo_pairs_invG
def assemble_eri(cell, orb_pair_invG1, orb_pair_G2, q=None):
'''Assemble 4-index electron repulsion integrals.
Returns:
(nmo1*nmo2, nmo3*nmo4) ndarray
'''
if q is None:
q = np.zeros(3)
coulqG = tools.get_coulG(cell, -1.0*q)
ngrids = orb_pair_invG1.shape[0]
Jorb_pair_G2 = np.einsum('g,gn->gn',coulqG,orb_pair_G2)*(cell.vol/ngrids**2)
eri = np.dot(orb_pair_invG1.T, Jorb_pair_G2)
return eri
def get_ao_pairs_G(cell, kpt=np.zeros(3)):
'''Calculate forward (G|ij) and "inverse" (ij|G) FFT of all AO pairs.
Args:
cell : instance of :class:`Cell`
Returns:
ao_pairs_G, ao_pairs_invG : (ngrids, nao*(nao+1)/2) ndarray
The FFTs of the real-space AO pairs.
'''
coords = gen_uniform_grids(cell)
aoR = eval_ao(cell, coords, kpt) # shape = (coords, nao)
ngrids, nao = aoR.shape
gamma_point = abs(kpt).sum() < 1e-9
if gamma_point:
npair = nao*(nao+1)//2
ao_pairs_G = np.empty([ngrids, npair], np.complex128)
ij = 0
for i in range(nao):
for j in range(i+1):
ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]
ao_pairs_G[:,ij] = tools.fft(ao_ij_R, cell.mesh)
#ao_pairs_invG[:,ij] = ngrids*tools.ifft(ao_ij_R, cell.mesh)
ij += 1
ao_pairs_invG = ao_pairs_G.conj()
else:
ao_pairs_G = np.zeros([ngrids, nao,nao], np.complex128)
for i in range(nao):
for j in range(nao):
ao_ij_R = np.conj(aoR[:,i]) * aoR[:,j]
ao_pairs_G[:,i,j] = tools.fft(ao_ij_R, cell.mesh)
ao_pairs_invG = ao_pairs_G.transpose(0,2,1).conj().reshape(-1,nao**2)
ao_pairs_G = ao_pairs_G.reshape(-1,nao**2)
return ao_pairs_G, ao_pairs_invG
def get_ao_eri(cell, kpt=np.zeros(3)):
'''Convenience function to return AO 2-el integrals.'''
ao_pairs_G, ao_pairs_invG = get_ao_pairs_G(cell, kpt)
eri = assemble_eri(cell, ao_pairs_invG, ao_pairs_G)
if abs(kpt).sum() < 1e-9:
eri = eri.real
return eri
##################################################
#
# ao2mo/eris.py end
#
##################################################
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; C .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))],
'C' :'gth-szv',}
cell.pseudo = {'C':'gth-pade'}
cell.a = np.eye(3) * 2.5
cell.mesh = [21] * 3
cell.build()
np.random.seed(1)
kpts = np.random.random((4,3))
kpts[3] = kpts[0]-kpts[1]+kpts[2]
kpt0 = np.zeros(3)
cell1 = pgto.Cell()
cell1.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell1.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}
cell1.a = np.eye(3) * 2.5
cell1.mesh = [21] * 3
cell1.build()
kdf0 = mdf.MDF(cell1)
kdf0.auxbasis = 'weigend'
kdf0.mesh = [21] * 3
kdf0.kpts = kpts
def finger(a):
w = np.cos(np.arange(a.size))
return np.dot(w, a.ravel())
class KnownValues(unittest.TestCase):
def test_get_pp_loc_part1_high_cost(self):
df = aft.AFTDF(cell)
v1 = aft.get_pp_loc_part1(df, kpts[0])
self.assertAlmostEqual(finger(v1), (-6.0893491060887159+0.19823828749533859j), 8)
def test_aft_get_nuc(self):
df = aft.AFTDF(cell)
v1 = df.get_nuc(kpts[0])
self.assertAlmostEqual(finger(v1), (-5.764786312608102+0.19126292955145852j), 8)
def test_aft_get_pp(self):
v0 = pgto.pseudo.get_pp(cell, kpts[0])
v1 = aft.AFTDF(cell).get_pp(kpts)
self.assertTrue(np.allclose(v0, v1[0], atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(v1[0]), (-5.6240305085898807+0.22094834207603817j), 8)
v0 = pgto.pseudo.get_pp(cell, kpts[1])
self.assertTrue(np.allclose(v0, v1[1], atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(v1[1]), (-5.53877585793+1.043933371359j) ,8)
self.assertAlmostEqual(finger(v1[2]), (-6.05309558678+0.281728966073j), 8)
self.assertAlmostEqual(finger(v1[3]), (-5.60115995450+0.275973062529j), 8)
def test_aft_get_ao_eri(self):
df0 = fft.FFTDF(cell1)
df = aft.AFTDF(cell1)
eri0 = df0.get_ao_eri(compact=True)
eri1 = df.get_ao_eri(compact=True)
self.assertAlmostEqual(abs(eri0-eri1).max(), 0, 9)
eri0 = df0.get_ao_eri(kpts[0])
eri1 = df.get_ao_eri(kpts[0])
self.assertAlmostEqual(abs(eri0-eri1).max(), 0, 9)
eri0 = df0.get_ao_eri(kpts)
eri1 = df.get_ao_eri(kpts)
self.assertAlmostEqual(abs(eri0-eri1).max(), 0, 9)
def test_aft_get_ao_eri_high_cost(self):
df0 = fft.FFTDF(cell)
df = aft.AFTDF(cell)
eri0 = df0.get_ao_eri(compact=True)
eri1 = df.get_ao_eri(compact=True)
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(eri1), 0.80425361966560172, 8)
eri0 = df0.get_ao_eri(kpts[0])
eri1 = df.get_ao_eri(kpts[0])
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(eri1), (2.9346374476387949-0.20479054936779137j), 8)
eri0 = df0.get_ao_eri(kpts)
eri1 = df.get_ao_eri(kpts)
self.assertTrue(np.allclose(eri0, eri1, atol=1e-5, rtol=1e-5))
self.assertAlmostEqual(finger(eri1), (0.33709287302019619-0.94185725020966538j), 8)
def test_get_eri_gamma(self):
odf0 = mdf.MDF(cell1)
odf = aft.AFTDF(cell1)
ref = odf0.get_eri()
eri0000 = odf.get_eri(compact=True)
self.assertTrue(eri0000.dtype == numpy.double)
self.assertTrue(np.allclose(eri0000, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri0000), 0.23714016293926865, 9)
def test_get_eri_gamma(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))
eri1111 = odf.get_eri((kpts[0],kpts[0],kpts[0],kpts[0]))
self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)
eri1111 = odf.get_eri((kpts[0]+1e-8,kpts[0]+1e-8,kpts[0],kpts[0]))
self.assertTrue(np.allclose(eri1111, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri1111), (1.2410388899583582-5.2370501878355006e-06j), 9)
def test_get_eri_0011(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
self.assertTrue(np.allclose(eri0011, ref, atol=1e-3, rtol=1e-3))
self.assertAlmostEqual(finger(eri0011), (1.2410162858084512+0.00074485383749912936j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[0],kpts[1],kpts[1]))
eri0011 = odf.get_eri((kpts[0],kpts[0],kpts[1],kpts[1]))
self.assertTrue(np.allclose(eri0011, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(finger(eri0011), (1.2410162860852818+0.00074485383748954838j), 9)
def test_get_eri_0110(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))
eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-6, rtol=1e-6))
self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, (kpts[0],kpts[1],kpts[1],kpts[0]))
eri0110 = odf.get_eri((kpts[0],kpts[1],kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
eri0110 = odf.get_eri((kpts[0]+1e-8,kpts[1]+1e-8,kpts[1],kpts[0]))
self.assertTrue(np.allclose(eri0110, ref, atol=1e-9, rtol=1e-9))
self.assertAlmostEqual(finger(eri0110), (1.2928399254827956-0.011820590601969154j), 9)
def test_get_eri_0123(self):
odf = aft.AFTDF(cell1)
ref = kdf0.get_eri(kpts)
eri1111 = odf.get_eri(kpts)
self.assertAlmostEqual(abs(eri1111-ref).max(), 0, 9)
self.assertAlmostEqual(finger(eri1111), (1.2917759427391706-0.013340252488069412j), 9)
ref = fft.FFTDF(cell1).get_mo_eri([numpy.eye(cell1.nao_nr())]*4, kpts)
self.assertAlmostEqual(abs(eri1111-ref).max(), 0, 9)
def test_get_mo_eri(self):
df0 = fft.FFTDF(cell1)
odf = aft.AFTDF(cell1)
nao = cell1.nao_nr()
numpy.random.seed(5)
mo =(numpy.random.random((nao,nao)) +
numpy.random.random((nao,nao))*1j)
eri_mo0 = df0.get_mo_eri((mo,)*4, kpts)
eri_mo1 = odf.get_mo_eri((mo,)*4, kpts)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
kpts_t = (kpts[2],kpts[3],kpts[0],kpts[1])
eri_mo2 = df0.get_mo_eri((mo,)*4, kpts_t)
eri_mo2 = eri_mo2.reshape((nao,)*4).transpose(2,3,0,1).reshape(nao**2,-1)
self.assertTrue(np.allclose(eri_mo2, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],)*4)
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],)*4)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[1],kpts[1],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpt0,kpt0,kpts[0],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))
eri_mo1 = odf.get_mo_eri((mo,)*4, (kpts[0],kpts[0],kpt0,kpt0,))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
mo1 = mo[:,:nao//2+1]
eri_mo0 = df0.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)
eri_mo1 = odf.get_mo_eri((mo1,mo,mo,mo1), (kpts[0],)*4)
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
eri_mo0 = df0.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))
eri_mo1 = odf.get_mo_eri((mo1,mo,mo1,mo), (kpts[0],kpts[1],kpts[1],kpts[0],))
self.assertTrue(np.allclose(eri_mo1, eri_mo0, atol=1e-7, rtol=1e-7))
def test_init_aft_1d(self):
cell = pgto.Cell()
cell.atom = 'He 1. .5 .5; He .1 1.3 2.1'
cell.basis = {'He': [(0, (2.5, 1)), (0, (1., 1))]}
cell.a = np.eye(3) * 2.5
cell.dimension = 1
cell.mesh = [3, 3, 3]
cell.build()
f = aft.AFTDF(cell)
np.random.seed(1)
f.kpts = np.random.random((4,3))
f.check_sanity()
if __name__ == '__main__':
print("Full Tests for aft")
unittest.main()
| {"hexsha": "fe20da7339f967e84b6a3503e7d1766950edec7a", "size": 18895, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyscf/pbc/df/test/test_aft.py", "max_stars_repo_name": "shufay/pyscf", "max_stars_repo_head_hexsha": "c7ea840b012a59fce5fa4114ef3274a7cf00165e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-07T15:26:49.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-07T15:26:49.000Z", "max_issues_repo_path": "pyscf/pbc/df/test/test_aft.py", "max_issues_repo_name": "shufay/pyscf", "max_issues_repo_head_hexsha": "c7ea840b012a59fce5fa4114ef3274a7cf00165e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyscf/pbc/df/test/test_aft.py", "max_forks_repo_name": "shufay/pyscf", "max_forks_repo_head_hexsha": "c7ea840b012a59fce5fa4114ef3274a7cf00165e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-09T20:34:10.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-09T20:34:10.000Z", "avg_line_length": 37.0490196078, "max_line_length": 107, "alphanum_fraction": 0.5961894681, "include": true, "reason": "import numpy", "num_tokens": 6747} |
[STATEMENT]
lemma T_A'_eq_lem: "(\<Sum>i=0..<length qs. t_A' i) =
T (s_A' 0) (drop 0 qs) (drop 0 acts)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sum t_A' {0..<length qs} = int (T (s_A' 0) (drop 0 qs) (drop 0 acts))
[PROOF STEP]
proof(induction rule: zero_induct[of _ "size qs"])
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. sum t_A' {length qs..<length qs} = int (T (s_A' (length qs)) (drop (length qs) qs) (drop (length qs) acts))
2. \<And>n. sum t_A' {Suc n..<length qs} = int (T (s_A' (Suc n)) (drop (Suc n) qs) (drop (Suc n) acts)) \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
case 1
[PROOF STATE]
proof (state)
this:
goal (2 subgoals):
1. sum t_A' {length qs..<length qs} = int (T (s_A' (length qs)) (drop (length qs) qs) (drop (length qs) acts))
2. \<And>n. sum t_A' {Suc n..<length qs} = int (T (s_A' (Suc n)) (drop (Suc n) qs) (drop (Suc n) acts)) \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sum t_A' {length qs..<length qs} = int (T (s_A' (length qs)) (drop (length qs) qs) (drop (length qs) acts))
[PROOF STEP]
by (simp add: len_acts)
[PROOF STATE]
proof (state)
this:
sum t_A' {length qs..<length qs} = int (T (s_A' (length qs)) (drop (length qs) qs) (drop (length qs) acts))
goal (1 subgoal):
1. \<And>n. sum t_A' {Suc n..<length qs} = int (T (s_A' (Suc n)) (drop (Suc n) qs) (drop (Suc n) acts)) \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<And>n. sum t_A' {Suc n..<length qs} = int (T (s_A' (Suc n)) (drop (Suc n) qs) (drop (Suc n) acts)) \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
case (2 n)
[PROOF STATE]
proof (state)
this:
sum t_A' {Suc n..<length qs} = int (T (s_A' (Suc n)) (drop (Suc n) qs) (drop (Suc n) acts))
goal (1 subgoal):
1. \<And>n. sum t_A' {Suc n..<length qs} = int (T (s_A' (Suc n)) (drop (Suc n) qs) (drop (Suc n) acts)) \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
show ?case
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
proof cases
[PROOF STATE]
proof (state)
goal (2 subgoals):
1. ?P \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
2. \<not> ?P \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
assume "n < length qs"
[PROOF STATE]
proof (state)
this:
n < length qs
goal (2 subgoals):
1. ?P \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
2. \<not> ?P \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
n < length qs
goal (1 subgoal):
1. sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
using 2
[PROOF STATE]
proof (prove)
using this:
n < length qs
sum t_A' {Suc n..<length qs} = int (T (s_A' (Suc n)) (drop (Suc n) qs) (drop (Suc n) acts))
goal (1 subgoal):
1. sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
by(simp add: Cons_nth_drop_Suc[symmetric,where i=n] len_acts sum.atLeast_Suc_lessThan
t_A'_t free_A_def paid_A'_def)
[PROOF STATE]
proof (state)
this:
sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
goal (1 subgoal):
1. \<not> n < length qs \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
next
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. \<not> n < length qs \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
assume "\<not> n < length qs"
[PROOF STATE]
proof (state)
this:
\<not> n < length qs
goal (1 subgoal):
1. \<not> n < length qs \<Longrightarrow> sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
thus ?case
[PROOF STATE]
proof (prove)
using this:
\<not> n < length qs
goal (1 subgoal):
1. sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
[PROOF STEP]
by (simp add: len_acts)
[PROOF STATE]
proof (state)
this:
sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
goal:
No subgoals!
[PROOF STEP]
qed
[PROOF STATE]
proof (state)
this:
sum t_A' {n..<length qs} = int (T (s_A' n) (drop n qs) (drop n acts))
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 2225, "file": "List_Update_BIT", "length": 18} |
import numpy as np
import matplotlib.pyplot as plt
x = np.arange(-3, 3, 0.1)
y = np.sin(x)
plt.plot(x, y)
plt.show()
| {"hexsha": "43d88e9a27c4ed344285efa1a69b441be6f802dc", "size": 118, "ext": "py", "lang": "Python", "max_stars_repo_path": "test.py", "max_stars_repo_name": "hyoiutu/PRMLPractice", "max_stars_repo_head_hexsha": "d303d2783dc06136eb61da7aa2630bb64d6c6ee9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test.py", "max_issues_repo_name": "hyoiutu/PRMLPractice", "max_issues_repo_head_hexsha": "d303d2783dc06136eb61da7aa2630bb64d6c6ee9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test.py", "max_forks_repo_name": "hyoiutu/PRMLPractice", "max_forks_repo_head_hexsha": "d303d2783dc06136eb61da7aa2630bb64d6c6ee9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.75, "max_line_length": 31, "alphanum_fraction": 0.6610169492, "include": true, "reason": "import numpy", "num_tokens": 41} |
# Copyright 2017 Abien Fred Agarap
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =========================================================================
"""Implementation of the Multilayer Perceptron using TensorFlow"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
__version__ = '0.1.0'
__author__ = 'Abien Fred Agarap'
import numpy as np
import os
import sys
import time
import tensorflow as tf
class MLP:
"""Implementation of the Multilayer Perceptron using TensorFlow"""
def __init__(self, alpha, batch_size, node_size, num_classes, num_features):
"""Initialize the MLP model
Parameter
---------
alpha : float
The learning rate to be used by the neural network.
batch_size : int
The number of batches to use for training/validation/testing.
node_size : int
The number of neurons in the neural network.
num_classes : int
The number of classes in a dataset.
num_features : int
The number of features in a dataset.
"""
self.alpha = alpha
self.batch_size = batch_size
self.node_size = node_size
self.num_classes = num_classes
self.num_features = num_features
def __graph__():
"""Build the inference graph"""
with tf.name_scope('input'):
# [BATCH_SIZE, NUM_FEATURES]
x_input = tf.placeholder(dtype=tf.float32, shape=[None, self.num_features], name='x_input')
# [BATCH_SIZE]
y_input = tf.placeholder(dtype=tf.uint8, shape=[None], name='y_input')
# [BATCH_SIZE, NUM_CLASSES]
y_onehot = tf.one_hot(indices=y_input, depth=self.num_classes, on_value=1, off_value=0, name='y_onehot')
learning_rate = tf.placeholder(dtype=tf.float32, name='learning_rate')
first_hidden_layer = {'weights': self.weight_variable('h1_w_layer', [self.num_features, self.node_size[0]]),
'biases': self.bias_variable('h1_b_layer', [self.node_size[0]])}
second_hidden_layer = {'weights': self.weight_variable('h2_w_layer', [self.node_size[0],
self.node_size[1]]),
'biases': self.bias_variable('h2_b_layer', [self.node_size[1]])}
third_hidden_layer = {'weights': self.weight_variable('h3_w_layer', [self.node_size[1], self.node_size[2]]),
'biases': self.bias_variable('h3_b_layer', [self.node_size[2]])}
output_layer = {'weights': self.weight_variable('output_w_layer', [self.node_size[2], self.num_classes]),
'biases': self.bias_variable('output_b_layer', [self.num_classes])}
first_layer = tf.matmul(x_input, first_hidden_layer['weights']) + first_hidden_layer['biases']
first_layer = tf.nn.relu(first_layer)
second_layer = tf.matmul(first_layer, second_hidden_layer['weights']) + second_hidden_layer['biases']
second_layer = tf.nn.relu(second_layer)
third_layer = tf.matmul(second_layer, third_hidden_layer['weights']) + third_hidden_layer['biases']
third_layer = tf.nn.relu(third_layer)
output_layer = tf.matmul(third_layer, output_layer['weights']) + output_layer['biases']
tf.summary.histogram('pre-activations', output_layer)
with tf.name_scope('loss'):
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=output_layer, labels=y_onehot))
tf.summary.scalar('loss', loss)
optimizer_op = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(loss)
with tf.name_scope('accuracy'):
predicted_class = tf.nn.softmax(output_layer)
with tf.name_scope('correct_prediction'):
correct_prediction = tf.equal(tf.argmax(predicted_class, 1), tf.argmax(y_onehot, 1))
with tf.name_scope('accuracy'):
accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
self.x_input = x_input
self.y_input = y_input
self.y_onehot = y_onehot
self.learning_rate = learning_rate
self.loss = loss
self.optimizer_op = optimizer_op
self.predicted_class = predicted_class
self.accuracy = accuracy
self.merged = merged
sys.stdout.write('\n<log> Building Graph...')
__graph__()
sys.stdout.write('</log>\n')
def train(self, num_epochs, log_path, train_data, train_size, test_data, test_size, result_path):
"""Trains the MLP model
Parameter
---------
num_epochs : int
The number of passes over the entire dataset.
log_path : str
The path where to save the TensorBoard logs.
train_data : numpy.ndarray
The NumPy array to be used as training dataset.
train_size : int
The size of the `train_data`.
test_data : numpy.ndarray
The NumPy array to be used as testing dataset.
test_size : int
The size of the `test_data`.
"""
# initialize the variables
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
timestamp = str(time.asctime())
train_writer = tf.summary.FileWriter(log_path + timestamp + '-training', graph=tf.get_default_graph())
test_writer = tf.summary.FileWriter(log_path + timestamp + '-test', graph=tf.get_default_graph())
with tf.Session() as sess:
sess.run(init_op)
try:
for step in range(num_epochs * train_size // self.batch_size):
offset = (step * self.batch_size) % train_size
train_data_batch = train_data[0][offset:(offset + self.batch_size)]
train_label_batch = train_data[1][offset:(offset + self.batch_size)]
feed_dict = {self.x_input: train_data_batch, self.y_input: train_label_batch,
self.learning_rate: self.alpha}
train_summary, _, step_loss = sess.run([self.merged, self.optimizer_op, self.loss],
feed_dict=feed_dict)
if step % 100 == 0 and step > 0:
train_accuracy = sess.run(self.accuracy, feed_dict=feed_dict)
print('step [{}] train -- loss : {}, accuracy : {}'.format(step, step_loss, train_accuracy))
train_writer.add_summary(train_summary, global_step=step)
except KeyboardInterrupt:
print('KeyboardInterrupt at step {}'.format(step))
os._exit(1)
finally:
print('EOF -- Training done at step {}'.format(step))
for step in range(num_epochs * test_size // self.batch_size):
offset = (step * self.batch_size) % test_size
test_data_batch = test_data[0][offset:(offset + self.batch_size)]
test_label_batch = test_data[1][offset:(offset + self.batch_size)]
feed_dict = {self.x_input: test_data_batch, self.y_input: test_label_batch}
test_summary, test_accuracy, test_loss, predictions, actual =\
sess.run([self.merged, self.accuracy, self.loss, self.predicted_class, self.y_onehot],
feed_dict=feed_dict)
if step % 100 == 0 and step > 0:
print('step [{}] test -- loss : {}, accuracy : {}'.format(step, test_loss, test_accuracy))
test_writer.add_summary(test_summary, step)
self.save_labels(predictions=predictions, actual=actual, result_path=result_path, phase='testing',
step=step)
print('EOF -- Testing done at step {}'.format(step))
@staticmethod
def weight_variable(name, shape):
"""Initialize weight variable
Parameter
---------
shape : list
The shape of the initialized value.
Returns
-------
The created `tf.get_variable` for weights.
"""
initial_value = tf.random_normal(shape=shape, stddev=0.01)
return tf.get_variable(name=name, initializer=initial_value)
@staticmethod
def bias_variable(name, shape):
"""Initialize bias variable
Parameter
---------
shape : list
The shape of the initialized value.
Returns
-------
The created `tf.get_variable` for biases.
"""
initial_value = tf.constant([0.1], shape=shape)
return tf.get_variable(name=name, initializer=initial_value)
@staticmethod
def variable_summaries(var):
with tf.name_scope('summaries'):
mean = tf.reduce_mean(var)
tf.summary.scalar('mean', mean)
with tf.name_scope('stddev'):
stddev = tf.sqrt(tf.reduce_mean(tf.square(var - mean)))
tf.summary.scalar('stddev', stddev)
tf.summary.scalar('max', tf.reduce_max(var))
tf.summary.scalar('min', tf.reduce_min(var))
tf.summary.histogram('histogram', var)
@staticmethod
def save_labels(predictions, actual, result_path, phase, step):
"""Saves the actual and predicted labels to a NPY file
Parameter
---------
predictions : numpy.ndarray
The NumPy array containing the predicted labels.
actual : numpy.ndarray
The NumPy array containing the actual labels.
result_path : str
The path where to save the concatenated actual and predicted labels.
step : int
The time step for the NumPy arrays.
phase : str
The phase for which the predictions is, i.e. training/validation/testing.
"""
if not os.path.exists(path=result_path):
os.mkdir(result_path)
# Concatenate the predicted and actual labels
labels = np.concatenate((predictions, actual), axis=1)
# save every labels array to NPY file
np.save(file=os.path.join(result_path, '{}-mlp-{}.npy'.format(phase, step)), arr=labels)
| {"hexsha": "6aa241ad753743224a19af45b1f408efa16b8f60", "size": 11178, "ext": "py", "lang": "Python", "max_stars_repo_path": "MLP.py", "max_stars_repo_name": "modi975/multilayer-perceptron", "max_stars_repo_head_hexsha": "4cfc9bd79ec145f6ecd2c5f1c12e9df7156e5e70", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MLP.py", "max_issues_repo_name": "modi975/multilayer-perceptron", "max_issues_repo_head_hexsha": "4cfc9bd79ec145f6ecd2c5f1c12e9df7156e5e70", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MLP.py", "max_forks_repo_name": "modi975/multilayer-perceptron", "max_forks_repo_head_hexsha": "4cfc9bd79ec145f6ecd2c5f1c12e9df7156e5e70", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.4, "max_line_length": 120, "alphanum_fraction": 0.5966183575, "include": true, "reason": "import numpy", "num_tokens": 2269} |
"""
Created on 2019-02-01 22:11:43
@author: George Kyriakides
ge.kyriakides@gmail.com
"""
import copy
from typing import List
import numpy as np
from .genome import Genome
def get_distance(g1: Genome, g2: Genome):
c_1 = 1
c_2 = 1
N = 1
innovations_1, innovations_2 = [], []
innovations_1.extend(g1.connections.index)
innovations_1.extend(g1.nodes.index)
innovations_2.extend(g2.connections.index)
innovations_2.extend(g2.nodes.index)
# Get the max innovation of the two genomes
max_innovation_1 = max(innovations_1)
max_innovation_2 = max(innovations_2)
# Get the minimum of the above
min_innovation = min(max_innovation_1, max_innovation_2)
total_1 = len(innovations_1)
total_2 = len(innovations_2)
# Swap so innovations_1 has min_innovation
if min_innovation == max_innovation_2:
tmp = innovations_1
innovations_1 = innovations_2
innovations_2 = tmp
tmp = total_1
total_1 = total_2
total_2 = tmp
# Excess and disjoint
E = 0
D = 0
for i in innovations_1:
# Homologous
if i in innovations_2:
innovations_2.remove(i)
total_2 -= 1
else:
D += 1
total_1 -= 1
for i in sorted(innovations_2):
if i < min_innovation:
D += 1
total_2 -= 1
else:
break
E = total_2
delta = (c_1*E + c_2*D) / N
return delta
def sharing_f(delta: float):
threshold = 4
if delta > threshold:
return 0
return 1
def get_distance_matrix(pop: List[Genome]):
pop_size = len(pop)
matrix = np.zeros((pop_size, pop_size))
for i in range(pop_size):
for j in range(i+1, pop_size):
d = get_distance(pop[i], pop[j])
matrix[i][j] = d
matrix[j][i] = d
return matrix
class SpeciesPopulations(object):
def __init__(self, population_size: int, crossover_rate: float):
self.species = list()
self.population_size = population_size
self.reproduction_sizes = []
self.crossover_rate = crossover_rate
def update_species(self, pop: List[Genome]):
for s in self.species:
s.population = []
# count = 0
for g in pop:
# print(count)
# count += 1
found = False
for i in range(len(self.species)):
species = self.species[i]
if species.attempt_append(g):
found = True
break
if not found:
s = Species(g)
self.species.append(s)
for s in self.species:
s.share_fitness()
self.reproduction_sizes = self.__get_reproduction_sizes()
for i in range(len(self.species)):
# print('species', i, len(self.species))
self.species[i].reproduce(self.reproduction_sizes[i],
self.crossover_rate)
def get_all_individuals(self):
individuals = []
for s in self.species:
individuals.extend(s.population)
return individuals
def __get_reproduction_sizes(self):
fitness_sum = sum([s.total_fitness for s in self.species])
reproduction_sizes = [
int(np.floor(self.population_size*s.total_fitness/fitness_sum)) for s in self.species]
return reproduction_sizes
class Species(object):
def __init__(self, g: Genome):
self.representative = g
self.population = [g]
self.total_fitness = 0
def attempt_append(self, g: Genome):
if sharing_f(get_distance(g, self.representative)) == 1:
self.population.append(g)
return True
return False
def share_fitness(self):
self.total_fitness = 0
sz = len(self.population)
# count = 0
for g in self.population:
# print('share', count)
# count += 1
g.connections.fitness = g.connections.fitness/sz
g.nodes.fitness = g.nodes.fitness/sz
self.total_fitness += (g.connections.fitness + g.nodes.fitness)
if len(self.population) > 0:
self.representative = np.random.choice(self.population)
def reproduce(self, new_size: int, crossover_rate: float):
if len(self.population) > 0:
# Crossover - Mutate
new_genomes = []
for j in range(new_size):
# print(j, 'size:', new_size)
a = self.tournament_selection()
if np.random.uniform() < crossover_rate:
# print('Crossover')
b = self.tournament_selection()
g = a.crossover(b)
else:
# print('Mutate')
g = copy.deepcopy(a)
# print('Done')
# print(g)
g.mutate()
# print('Append')
new_genomes.append(g)
self.population = new_genomes
def tournament_selection(self):
tournament_pc = 0.5
pressure = 0.8
tournament_sz = max(
int(np.floor(len(self.population) * tournament_pc)), 1)
t = np.random.choice(self.population, size=tournament_sz)
fs = [p.connections.fitness for p in t]
ranks = np.argsort(fs)
place = np.random.uniform()
cumm_p = 0
for i in range(tournament_sz):
cumm_p += pressure * ((1-pressure)**i)
if place < cumm_p:
return t[ranks[i]]
return t[ranks[0]]
| {"hexsha": "18cd280104416c3b260d802e6c2cb6dd765fbaee", "size": 5906, "ext": "py", "lang": "Python", "max_stars_repo_path": "nord/design/metaheuristics/genetics/neat/speciation.py", "max_stars_repo_name": "GeorgeKyriakides/nord", "max_stars_repo_head_hexsha": "94f4d6503dfe2ed9aaebc9e02d55aaba81c02994", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 15, "max_stars_repo_stars_event_min_datetime": "2018-10-23T05:48:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-20T18:34:21.000Z", "max_issues_repo_path": "nord/design/metaheuristics/genetics/neat/speciation.py", "max_issues_repo_name": "SoftwareImpacts/SIMPAC-2020-55", "max_issues_repo_head_hexsha": "c200dddf21cb830ec980ade213ce1b9d9add6cc5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-10-27T15:59:30.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:49:12.000Z", "max_forks_repo_path": "nord/design/metaheuristics/genetics/neat/speciation.py", "max_forks_repo_name": "GeorgeKyriakides/nord", "max_forks_repo_head_hexsha": "94f4d6503dfe2ed9aaebc9e02d55aaba81c02994", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-10-28T09:03:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-16T21:35:08.000Z", "avg_line_length": 26.9680365297, "max_line_length": 99, "alphanum_fraction": 0.5386048087, "include": true, "reason": "import numpy", "num_tokens": 1346} |
data = split(
"""
.#..##.###...#######
##.############..##.
.#.######.########.#
.###.#######.####.#.
#####.##.#.##.###.##
..#####..#.#########
####################
#.####....###.#.#.##
##.#################
#####.##.###..####..
..######..##.#######
####.##.####...##..#
.#####..#.######.###
##...#.##########...
#.##########.#######
.####.#.###.###.#.##
....##.##.###..#####
.#.#.###########.###
#.#.#.#####.####.###
###.##.####.##.#..##
""", "\n")
data = split(
"""
......#.#.
#..#.#....
..#######.
.#.#.###..
.#..#.....
..#....#.#
#..#....#.
.##.#..###
##...#..#.
.#....####
""", "\n")
function maketuples(data)
a = Vector{Tuple{Int, Int}}()
for (i, line) in enumerate(data)
for j in findall(==("#"), split(line, ""))
push!(a, (i,j))
end
end
a
end
function countseen(a::Vector, i)
seen = Set{Tuple{Float64, Float64}}()
for asts in a
asts == i && continue
dir = asts .- i
dirp = dir[1] == 0 ? dir ./ abs(dir[2]) : dir ./ abs(dir[1])
push!(seen, dirp)
end
length(seen)
end
data = readlines("data/Dec10_data.txt")
a = maketuples(data)
t = countseen(a, a[1])
findmax(map(i->countseen(a, i), a))
using DataStructures
function seenDict(a::Vector, i)
seen = MultiDict{Tuple{Float64, Float64}, Tuple{Int, Int}}()
for asts in a
asts == i && continue
dir = asts .- i
dirp = dir[1] == 0 ? dir ./ abs(dir[2]) : dir ./ abs(dir[1])
insert!(seen, dirp, dir)
end
seen
end
function shootstars(myast, a)
ret = Tuple{Int, Int}[]
sd = seenDict(a, myast)
for (k,v) in sd
sort!(v, by = x->sum((x .- myast).^2), rev = true)
end
sortedkeys = sort(collect(keys(sd)), by = x->atan(x[2], x[1]), rev = true)
counter = 0
for key in sortedkeys
if !isempty(sd[key])
p = pop!(sd[key])
push!(ret, p .+ myast)
(counter += 1) == 200 && return p .+ myast .- (1,1)
end
end
end
res = shootstars(a[353], a)
res[2]*100 + res[1]
| {"hexsha": "a719ce5e82513b7e7181636187a12fb753256e96", "size": 2029, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Dec10.jl", "max_stars_repo_name": "mkborregaard/AdventOfCode2019", "max_stars_repo_head_hexsha": "189c8fa58b76c50923d9ebc8fd5e8949f6caa92a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Dec10.jl", "max_issues_repo_name": "mkborregaard/AdventOfCode2019", "max_issues_repo_head_hexsha": "189c8fa58b76c50923d9ebc8fd5e8949f6caa92a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Dec10.jl", "max_forks_repo_name": "mkborregaard/AdventOfCode2019", "max_forks_repo_head_hexsha": "189c8fa58b76c50923d9ebc8fd5e8949f6caa92a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.1354166667, "max_line_length": 78, "alphanum_fraction": 0.382454411, "num_tokens": 693} |
import os
import os.path
import sys
import collections
import copy
import numpy as np
import pylab as pl
import collections
import numbers
try:
# py2.x
from urllib import pathname2url
from urllib import url2pathname
from urllib import quote
from urllib import unquote
from urlparse import urlparse
from urlparse import urlunparse
from urlparse import urljoin
pass
except ImportError:
# py3.x
from urllib.request import pathname2url
from urllib.request import url2pathname
from urllib.parse import quote
from urllib.parse import unquote
from urllib.parse import urlparse
from urllib.parse import urlunparse
from urllib.parse import urljoin
pass
import greensconvolution
from .sourcevecs import build_flash_source
from .sourcevecs import definereflectors
from .sourcevecs import NaN_in_sourcevecs
from .sourcevecs import build_all_source_vecs
from .regularization import apply_tikhonov_regularization
#from .regularization import apply_regularizer
from .tile_rectangle import build_tiled_rectangle
from .fillholes import fillholes_flat
from .grid import build_gi_grid
try:
import pyopencl as cl
pass
except:
cl=None
pass
class NotANumberError(Exception):
pass
def timelimitmatrix(mtx,ny,nx,trange,timelimit):
nt=trange.shape[0]
# full array WAS indexed by y,x,and time
# NOW indexed by time,y,x
#mtxfull=mtx.reshape(ny,nx,nt,mtx.shape[1]);
mtxfull=mtx.reshape(nt,ny,nx,mtx.shape[1]);
timeselect=trange < timelimit
if np.count_nonzero(timeselect) == 0:
raise ValueError("Did not find any frames suitable for performing inversion of layer. Perhaps you should eliminate your shallowest layer and/or discard fewer frames after the initial flash")
#mtxreduced=mtxfull[:,:,timeselect,:]
mtxreduced=mtxfull[timeselect,:,:,:]
#timeselectmtx=np.ones((ny,nx,1),dtype='d')*timeselect.reshape(1,1,nt)
timeselectmtx=np.ones((1,ny,nx),dtype='d')*timeselect.reshape(nt,1,1)
newlength=ny*nx*np.count_nonzero(timeselect)
t_amount = (trange[timeselect][-1]-trange[timeselect][0]) * timeselect.shape[0]*1.0/(timeselect.shape[0]-1)
# print newlength
# print mtxreduced.shape
# print mtx.shape
# print timeselectmtx.shape
return (mtxreduced.reshape(newlength,mtx.shape[1]),timeselectmtx.reshape(mtx.shape[0]).astype(np.bool_),t_amount)
def generateinversionsteps(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths):
rowselects=[]
inversions=[]
inversionsfull=[]
inverses=[]
nresults=[]
# We go from the shallowest (last) entry to the first (deepest) depth
prevsourcevecs=flashsourcevecs
prevcolumnscaling = flashsourcecolumnscaling
# NOTE: reflector source vectors and column scaling are bottom (farthest) up.
# ... we will construct our matrix from top (nearest) down
# hence iterating backwards through reflectorsourcevecs and reflectorcolumnscaling
for cnt in range(len(depths)):
reflectorcnt=len(depths)-cnt-1
thesesourcevecs=reflectorsourcevecs[reflectorcnt]
thesecolumnscaling=reflectorcolumnscaling[reflectorcnt]
tstar=tstars[reflectorcnt]
if reflectorcnt==0:
# last entry... nresult covers both prev and current
nresult=prevsourcevecs.shape[1]+thesesourcevecs.shape[1]
pass
else:
# regular entry... nresult covers only prev
nresult=prevsourcevecs.shape[1]
pass
fullmatrix=np.concatenate((prevsourcevecs,thesesourcevecs),axis=1)
fullcolumnscaling=np.concatenate((prevcolumnscaling,thesecolumnscaling))
(inversion,rowselect,t_amount)=timelimitmatrix(fullmatrix,ny,nx,trange,tstar*2.0) # see also greensinversionstep definition of endframe
# (no longer) scale rows by t_amount to represent that
# s*V*x = U'*b where LHS is an integral
# over space (layer by layer)
# and RHS is an integral over space and time
# but we want to normalize the integration per
# unit length in x and t so as to make
# tikhonov parameter invariant.
# The row_scaling represents the dx*dy*dt
# of the time integral, but we (no longer) also need
# then to divide by total time of this particular
# calculation, which is t_amount
sys.stderr.write("Entering SVD; shape=%s\n" % (str(inversion.shape)))
(u,s,v)=np.linalg.svd(inversion,full_matrices=False) # inversion/t_amount
# note v here is already transposed so u*s*v = inversion
sys.stderr.write("Finished SVD; shape=%s\n" % (str(inversion.shape)))
# Scale u and v according to row scaling and column scaling
# sourcevecs were multiplied by row_scaling/column_scaling
# so that A_scaled (column_scaling*x) = b*row_scaling
# or A_scaled = A*row_scaling/column_scaling
# dividing u by row scaling
# and multiplying columns of v by column scaling
# Would make u*s*v the equivalent of the unscaled matrix.
# But that is not what we will use u and v for...
# Instead ut s, and vt form the inverse:
# vt * sinv * ut: where x = vt * sinv * ut * b
# We want this to apply to an unscaled vector b
# and give an unscaled result x
# So we need to scale the columns of ut (rows of u)
# by multiplying by row_scaling
# and scale the rows of vt (columns of v)
# by dividing by column_scalng
# note that u_scaled and v_scaled are no longer orthogonal matrices
u_scaled = u*rowscaling #/t_amount
v_scaled = v / fullcolumnscaling[np.newaxis,:]
#filter_factors = tikhonov_regularization(u, s, v, tikparam)
#inverse = apply_regularizer(u, s, v, filter_factors)
#inverse=np.dot(v.T*(1.0/s.reshape(1,s.shape[0])),u.T)
rowselects.append(rowselect)
inversions.append(inversion*(fullcolumnscaling[np.newaxis,:]/rowscaling)) # *t_amount
inversionsfull.append(fullmatrix*(fullcolumnscaling[np.newaxis,:]/rowscaling)) # *t_amount
inverses.append([u_scaled, s, v_scaled])
nresults.append(nresult)
prevsourcevecs=thesesourcevecs
prevcolumnscaling=thesecolumnscaling
pass
return (rowselects,inversions,inversionsfull,inverses,nresults)
r"""
def Get_OpenCL_Context():
OpenCL_CTX=None
if cl is None:
raise ValueError("Exception importing pyopencl (pyopencl is required for OpenCL support)")
# First search for first GPU platform
platforms = cl.get_platforms()
for platform in platforms:
platform_devices=platform.get_devices()
has_double_gpu=[bool(device.type & cl.device_type.GPU) and device.preferred_vector_width_double > 0 for device in platform_devices]
if np.any(has_double_gpu):
OpenCL_CTX = cl.Context(
#dev_type=cl.device_type.GPU,
devices=np.array(platform_devices,dtype='O')[np.array(has_double_gpu,dtype=np.bool)])
# properties=[(cl.context_properties.PLATFORM, platform)]
#self.OpenCL_Platform=platform
#self.figure_out_version()
pass
pass
if OpenCL_CTX is None:
# fall back to a CPU platform
for platform in platforms:
platform_devices=platform.get_devices()
has_double_cpu=[bool(device.type & cl.device_type.CPU) and device.preferred_vector_width_double > 0 for device in platform_devices]
if np.any(has_double_cpu):
OpenCL_CTX = cl.Context(
dev_type=cl.device_type.CPU,
devices=platform_devices[np.where(has_double_gpu)])
#OpenCL_Platform=platform
#self.figure_out_version()
pass
pass
pass
return OpenCL_CTX
"""
performinversionkernel=r"""
typedef unsigned long uint64_t;
typedef long int64_t;
__kernel void dodot(__global const double *matrix,
__global const double *vector,
__global double *outvec,
uint64_t firstvecrow,
uint64_t sumsize,
uint64_t matrix_ncols)
{
size_t gid=get_global_id(0); /* gid is which row of the matrix/outvec we are operating on */
size_t start_of_row = matrix_ncols*gid;
size_t cnt;
double result=0.0;
for (cnt=0; cnt < sumsize; cnt++) {
result += matrix[ start_of_row + cnt ]*vector[firstvecrow+cnt];
}
outvec[gid]=result;
}
__kernel void dodot_extrafactor(__global const double *matrix,
__global const double *vector,
__global double *outvec,
__global const double *extrafactor,
uint64_t firstvecrow,
uint64_t sumsize,
uint64_t matrix_ncols)
/* multply matrix*vector -> outvec, with an element-by-element multiply of
outvec by an extra factor */
{
size_t gid=get_global_id(0); /* gid is which row of the matrix/outvec we are operating on */
size_t start_of_row = matrix_ncols*gid;
size_t cnt;
double result=0.0;
for (cnt=0; cnt < sumsize; cnt++) {
result += matrix[ start_of_row + cnt ]*vector[firstvecrow+cnt];
}
outvec[gid]=result*extrafactor[gid];
//outvec[gid]=matrix[matrix_ncols*gid];
}
__kernel void dodot_subtractfrom(__global const double *matrix,
__global const double *vector,
__global double *outvec,
uint64_t firstvecrow,
uint64_t sumsize,
uint64_t matrix_ncols)
{
/* dot matrix*vector, subtract from outvec */
size_t gid=get_global_id(0); /* gid is which row of the matrix/outvec we are operating on */
size_t start_of_row = matrix_ncols*gid;
size_t cnt;
double result=0.0;
for (cnt=0; cnt < sumsize; cnt++) {
result += matrix[ start_of_row + cnt ]*vector[firstvecrow+cnt];
}
outvec[gid]-=result; /* WARNING: Non-atomic... make sure nothing else might be messing with this entry!!! */
}
"""
class queuelist(list):
def __init__(self,simplelist):
super(queuelist,self).__init__(simplelist)
pass
def __enter__(self):
for element in self:
element.__enter__()
return self
def __exit__(self,type,value,traceback):
for element in self:
element.__exit__(type,value,traceback)
pass
pass
pass
def parallelperforminversionsteps(OpenCL_CTX,rowselects,inversions,inversionsfull,inverses,nresults,inputmats, tikparams,GPU_Full_Inverse=False):
if cl is None:
raise ValueError("greensinversion.parallelperforminversionsteps: Failed to import PyOpenCL")
n_inputs = len(inputmats)
if not isinstance(tikparams,collections.Sequence) and not isinstance(tikparams,np.ndarray):
# single tikparam... broadcast it over all steps
tikparams = [ tikparams ]*len(rowselects)
pass
tikparams_list = [ copy.copy(tikparams) for inpcnt in range(n_inputs) ]
inversioncoeffs_list=[ [] for inpcnt in range(n_inputs) ]
errs_list=[ [] for inpcnt in range(n_inputs) ]
opencl_dodot = cl.Program(OpenCL_CTX,performinversionkernel).build()
opencl_dodot_function=opencl_dodot.dodot
opencl_dodot_function.set_scalar_arg_dtypes([ None, None, None,
np.uint64, np.uint64, np.uint64 ])
opencl_dodot_subtractfrom_function=opencl_dodot.dodot_subtractfrom
opencl_dodot_subtractfrom_function.set_scalar_arg_dtypes([ None, None, None,
np.uint64, np.uint64, np.uint64 ])
opencl_dodot_extrafactor_function=opencl_dodot.dodot_extrafactor
opencl_dodot_extrafactor_function.set_scalar_arg_dtypes([ None, None, None, None,
np.uint64, np.uint64, np.uint64 ])
residuals = [ np.array(inputmat.reshape(np.prod(inputmat.shape)),dtype='d',order="C") for inputmat in inputmats ]
res_buffers = [ cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR,hostbuf=residual) for residual in residuals ]
with queuelist([ cl.CommandQueue(OpenCL_CTX,properties=greensconvolution.greensconvolution_calc.OpenCL_GetOutOfOrderDeviceQueueProperties(OpenCL_CTX)) for inpcnt in range(n_inputs) ]) as queue:
lastiter_wait_events=[()]*n_inputs
for cnt in range(len(rowselects)):
rowselect=rowselects[cnt]
# assume row selection is contiguous (it is)
rowselectstart=np.where(rowselect)[0][0]
rowselectnum=np.where(rowselect)[0][-1]+1-rowselectstart
inversion=inversions[cnt]
inversionfull=inversionsfull[cnt]
(ui, si, vi) = inverses[cnt]
# WARNING: ui, vi have been scaled (see generateinversionsteps()) so they are no longer orthogonal matrices!!!
# better not to form the inverse just once here,
# because regularization could be different in each case
# (should it be???)
uitranspose_contiguous = np.ascontiguousarray(ui.T,dtype='d')
uitranspose_buffer = cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,hostbuf=uitranspose_contiguous)
vitranspose_contiguous = np.ascontiguousarray(vi.T,dtype='d')
vitranspose_buffer = cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,hostbuf=vitranspose_contiguous)
# storage for residual[rowselect] that we extract to determine err
residualrowselects = [ np.empty(rowselectnum,dtype='d',order="C") for inpcnt in range(n_inputs) ]
inverse_sis = []
inverse_si_buffers = []
for inpcnt in range(n_inputs):
tikparam=tikparams_list[inpcnt][cnt]
if tikparam is None:
# tikhonov regularization disabled
#inverse=np.dot(vi.T*(1.0/si.reshape(1,si.shape[0])),ui.T)
inverse_sis.append(np.array(1.0/si,dtype='d',order='C'))
inverse_si_buffers.append(cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_ONLY|cl.mem_flags.COPY_HOST_PTR,hostbuf=inverse_sis[inpcnt]))
pass
else:
assert(isinstance(tikparam,numbers.Number))
usetikparam=tikparam
tikparams_list[inpcnt][cnt]=usetikparam
# inverse = apply_tikhonov_regularization(ui, si, vi, usetikparam)
d = si/(si**2+(usetikparam)**2) # Tikhonov parameter interpreted deg K NETD * m^2 of depth / J/m^2 of source intensity
inverse_sis.append(np.array(d,dtype='d',order='C'))
inverse_si_buffers.append(cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_ONLY|cl.mem_flags.COPY_HOST_PTR,hostbuf=inverse_sis[inpcnt]))
# inverse = np.dot(v.T*(d.reshape(1,d.shape[0])),u.T)
pass
pass
nresult=nresults[cnt]
# Could probably optimize here a bit by using EnqueueCopyBuffer() rather than COPY_HOST_PTR...
#inverse_contiguous = np.ascontiguousarray(inverse)
#inverse_buffer = cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,hostbuf=inverse_contiguous)
# buffer to hold sinverse * utranspose
sut_buffers = [ cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_WRITE,size=inverse_sis[inpcnt].nbytes) for inpcnt in range(n_inputs) ]
inversion_contiguous = np.ascontiguousarray(inversion)
inversion_buffer = cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,hostbuf=inversion_contiguous)
if GPU_Full_Inverse:
inversionfull_contiguous = np.ascontiguousarray(inversionfull)
inversionfull_buffer = cl.Buffer(OpenCL_CTX,cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR,hostbuf=inversionfull_contiguous)
pass
bestfits = [ np.empty(vitranspose_contiguous.shape[0],dtype='d',order="C") for inpcnt in range(n_inputs) ]
bestfit_buffers = [ cl.Buffer(OpenCL_CTX,cl.mem_flags.WRITE_ONLY,size=bestfits[inpcnt].nbytes) for inpcnt in range(n_inputs) ]
reconstructeds = [ np.empty(inversion.shape[0],dtype='d',order="C") for inpcnt in range(n_inputs) ]
reconstructed_buffers = [ cl.Buffer(OpenCL_CTX,cl.mem_flags.WRITE_ONLY,size=reconstructeds[inpcnt].nbytes) for inpcnt in range(n_inputs) ]
#bestfit=np.dot(inverse,residuals[inpcnt][rowselect])
# multiply utranspose by vector, multiply result by inverse_si
ut_events= [ opencl_dodot_extrafactor_function(queue[inpcnt],(uitranspose_contiguous.shape[0],),None,
uitranspose_buffer,
res_buffers[inpcnt],
sut_buffers[inpcnt],
inverse_si_buffers[inpcnt],
rowselectstart,
rowselectnum,
uitranspose_contiguous.shape[1],
wait_for=lastiter_wait_events[inpcnt])
for inpcnt in range(n_inputs) ]
[ queue[inpcnt].flush() for inpcnt in range(n_inputs) ] # Get computation going
bestfit_events= [ opencl_dodot_function(queue[inpcnt],(vitranspose_contiguous.shape[0],),None,
vitranspose_buffer,
sut_buffers[inpcnt],
bestfit_buffers[inpcnt],
0,
vitranspose_contiguous.shape[1],
vitranspose_contiguous.shape[1],
wait_for=(ut_events[inpcnt],))
for inpcnt in range(n_inputs) ]
[ queue[inpcnt].flush() for inpcnt in range(n_inputs) ] # Get computation going
# get result copying
bestfit_enqueue_events=[ cl.enqueue_copy(queue[inpcnt],bestfits[inpcnt],bestfit_buffers[inpcnt],wait_for=(bestfit_events[inpcnt],),is_blocking=False) for inpcnt in range(n_inputs) ]
# reconstructed=np.dot(inversion,bestfit)
reconstructed_events= [ opencl_dodot_function(queue[inpcnt],(inversion.shape[0],),None,
inversion_buffer,
bestfit_buffers[inpcnt],
reconstructed_buffers[inpcnt],
0,
inversion.shape[1],
inversion.shape[1],
wait_for=(bestfit_events[inpcnt],))
for inpcnt in range(n_inputs) ]
[ queue[inpcnt].flush() for inpcnt in range(n_inputs) ] # Get computation going
# get result copying
reconstructed_enqueue_events=[ cl.enqueue_copy(queue[inpcnt],reconstructeds[inpcnt],reconstructed_buffers[inpcnt],wait_for=(reconstructed_events[inpcnt],),is_blocking=False) for inpcnt in range(n_inputs) ]
# get residuals[inpcnt][rowselect] also copying so we can look at the residual
# Is it worth using an OpenCL kernel to subtract the two?
residualrowselect_enqueue_events=[ cl.enqueue_copy(queue[inpcnt],residualrowselects[inpcnt],res_buffers[inpcnt],wait_for=lastiter_wait_events[inpcnt],device_offset=rowselectstart*residualrowselects[inpcnt].dtype.itemsize,is_blocking=False) for inpcnt in range(n_inputs) ]
# observe change in residual
#residual=residual-np.dot(inversionfull[:,:nresult],bestfit[:nresult])
if GPU_Full_Inverse:
residual_events= [ opencl_dodot_subtractfrom_function(queue[inpcnt],(inversionfull.shape[0],),None,
inversionfull_buffer,
bestfit_buffers[inpcnt],
res_buffers[inpcnt],
0,
nresult,
inversionfull.shape[1],
wait_for=(bestfit_events[inpcnt],))
for inpcnt in range(n_inputs) ]
lastiter_wait_events = [ (residual_event,) for residual_event in residual_events ] # list of events to wait for at start of next iteration
pass
else:
# Do the full inverse with the CPU, presumably because the GPU doesn't have enough memory to store it
# Wait for our bestfit data to be copied into place
[ event.wait() for event in bestfit_enqueue_events ]
if np.isnan(bestfits[inpcnt][:nresult]).any():
raise ValueError("Got NAN!")
residual_update_copy_enqueue_events=[]
for inpcnt in range(n_inputs):
residuals[inpcnt] -= np.dot(inversionfull[:,:nresult],bestfits[inpcnt][:nresult])
residual_update_copy_enqueue_events.append((cl.enqueue_copy(queue[inpcnt],res_buffers[inpcnt],residuals[inpcnt],wait_for=(residualrowselect_enqueue_events[inpcnt],),is_blocking=False),))
pass
lastiter_wait_events=[]
lastiter_wait_events.extend(residual_update_copy_enqueue_events)
pass
# Wait for our bestfit data to be copied into place
[ event.wait() for event in bestfit_enqueue_events ]
# bestfits numpy arrays are now legitimate
# print nresults,bestfit.shape
# print " "
# print " "
# inversioncoeffs.extend(list(bestfit[:nresult]))
[ inversioncoeffs_list[inpcnt].extend(list(bestfits[inpcnt][:nresult])) for inpcnt in range(n_inputs) ]
# wait for reconstruction to become available, so we can evaluate the error
[ event.wait() for event in reconstructed_enqueue_events ]
[ event.wait() for event in residualrowselect_enqueue_events ]
# reconstructed and residualrowselect arrays are now available
# err=np.linalg.norm(reconstructed-residual[rowselect])
[ errs_list[inpcnt].append(np.linalg.norm(reconstructeds[inpcnt]-residualrowselects[inpcnt])) for inpcnt in range(n_inputs) ]
#print inversion.shape
#print bestfit.shape
#print nresult
#print residual.shape
[ reconstructed_buffer.release() for reconstructed_buffer in reconstructed_buffers ]
[ bestfit_buffer.release() for bestfit_buffer in bestfit_buffers ]
if GPU_Full_Inverse:
inversionfull_buffer.release()
pass
inversion_buffer.release()
#inverse_buffer.release()
[ sut_buffer.release() for sut_buffer in sut_buffers ]
[ inverse_si_buffer.release() for inverse_si_buffer in inverse_si_buffers ]
pass
[ queue[inpcnt].finish() for inpcnt in range(n_inputs) ]
pass
# convert elements in inversion_coeffs_list to arrays
inversioncoeffs_list=[ np.array(invcoeffs,dtype='d') for invcoeffs in inversioncoeffs_list ]
return (inversioncoeffs_list,errs_list,tikparams_list)
def performinversionsteps(rowselects,inversions,inversionsfull,inverses,nresults,inputmat, tikparam):
# tikparam: if None, disable regularization
# if a list, use values according to step
# if a number, use that value
inputmat=inputmat.reshape(np.prod(inputmat.shape))
# assert(inputmat.shape[0]=inverses.shape
inversioncoeffs=[]
errs=[]
tikparams=[]
residual=inputmat
for cnt in range(len(rowselects)):
rowselect=rowselects[cnt]
inversion=inversions[cnt]
inversionfull=inversionsfull[cnt]
(ui, si, vi) = inverses[cnt]
if tikparam is None:
# tikhonov regularization disabled
# NOTE: This next line is probably the slow computaton part
# We should just multiply by the inverse components
# rather than doing an (expensive) matrix multiply
#inverse=np.dot(vi.T*(1.0/si.reshape(1,si.shape[0])),ui.T)
# bestfit=np.dot(inverse,residual[rowselect])
# Faster:
bestfit = np.dot(vi.T,np.dot(ui.T,residual[rowselect])*(1.0/si))
pass
else:
if isinstance(tikparam,collections.Sequence) or isinstance(tikparam,np.ndarray):
# a list or similar
usetikparam=tikparam[cnt]
pass
else:
assert(isinstance(tikparam,numbers.Number))
usetikparam=tikparam
pass
tikparams.append(usetikparam)
# NOTE: This next line is probably the slow computaton part
bestfit = apply_tikhonov_regularization(ui, si, vi, usetikparam,residual[rowselect])
pass
nresult=nresults[cnt]
reconstructed=np.dot(inversion,bestfit)
err=np.linalg.norm(reconstructed-residual[rowselect])
# print nresults,bestfit.shape
# print " "
# print " "
inversioncoeffs.extend(list(bestfit[:nresult]))
#print inversion.shape
#print bestfit.shape
#print nresult
#print residual.shape
residual=residual-np.dot(inversionfull[:,:nresult],bestfit[:nresult])
errs.append(err)
pass
return (np.array(inversioncoeffs,dtype='d'),residual,errs,tikparams)
def serialperforminversionsteps(OpenCL_CTX,rowselects,inversions,inversionsfull,inverses,nresults,inputmats, tikparams,GPU_Full_Inverse=False):
# Does not use OpenCL_CTX or GPU_Full_Inverse
n_inputs=len(inputmats)
inversioncoeffs_list=[]
errs_list=[]
tikparams_list=[]
for inpcnt in range(n_inputs):
(inversioncoeffs,residual,errs,tikparams_out)=performinversionsteps(rowselects,inversions,inversionsfull,inverses,nresults,inputmats[inpcnt],tikparams)
inversioncoeffs_list.append(inversioncoeffs)
errs_list.append(errs)
tikparams_list.append(tikparams_out)
pass
return (inversioncoeffs_list,errs_list,tikparams_list)
def generatesinglestepinversion(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths):
# *** NOTE: Should we use timelimitmatrix to use only a limited range of time for our deepest layer rather than using all frames?
# See generateinversionsteps() for more details
# NOTE: reflector source vectors and column scaling are bottom (farthest) up.
# ... we will construct our matrix from top (nearest) down
toconcat=[flashsourcevecs]
toconcat.extend(reflectorsourcevecs[::-1])
columnscaling_toconcat=[flashsourcecolumnscaling]
columnscaling_toconcat.extend(reflectorcolumnscaling[::-1])
inversionall=np.concatenate(toconcat,axis=1)
columnscalingall=np.concatenate(columnscaling_toconcat)
t_amount=(trange[-1]-trange[0])*trange.shape[0]*1.0/(trange.shape[0]-1)
sys.stderr.write("Entering single step SVD; shape=%s\n" % (str(inversionall.shape)))
(uiall,siall,viall)=np.linalg.svd(inversionall,full_matrices=False) # inversionall/t_amount
sys.stderr.write("Completed single step SVD; shape=%s\n" % (str(inversionall.shape)))
uiall_scaled=uiall*rowscaling # /t_amount
viall_scaled=viall / columnscalingall[np.newaxis,:]
# if tikparam is None:
# # tikhonov regularization disabled
# inverseall=np.dot(viall.T*(1.0/siall.reshape(1,siall.shape[0])),uiall.T)
# pass
#else:
# if tikparam==-1:
# usetikparam=generate_tikhonov_parameter()
#filter_factors = tikhonov_regularization(uiall, siall, viall, tikparam)
#inverseall = apply_regularizer(uiall, siall, viall, filter_factors)
#
rowselects=[np.ones(ny*nx*trange.shape[0],dtype=np.bool_)] # all True
inversions=[inversionall*(columnscalingall[np.newaxis,:]/rowscaling)] # t_amount
inversionsfull=[inversions[0]]
inverses=[ [uiall_scaled, siall, viall_scaled], ]
nresults=[inversionall.shape[1]]
return (rowselects,inversions,inversionsfull,inverses,nresults)
def plotabstractinverse(fignum,numplotrows,numplotcols,inversioncoeffs,reflectors,vmin,vmax,y_bnd,x_bnd,num_sources_y,num_sources_x):
fig=pl.figure(fignum)
pl.clf()
inversioncoeffspos=0
subplots=[]
images=[]
for subplotnum in range(1,len(reflectors)+2):
if subplotnum==1:
depth=0.0
ny=num_sources_y
nx=num_sources_x
pass
else:
(depth,ny,nx)=reflectors[len(reflectors)-subplotnum+1]
pass
numampls=ny*nx
subplotcoeffs=inversioncoeffs[inversioncoeffspos:(inversioncoeffspos+numampls)]
# print subplotcoeffs.shape
# print ny,nx
# print inversioncoeffs.shape
subplot=pl.subplot(numplotrows,numplotcols,subplotnum)
image=pl.imshow(subplotcoeffs.reshape(ny,nx),vmin=vmin,vmax=vmax,extent=(x_bnd[0]*1.e3,x_bnd[-1]*1.e3,y_bnd[-1]*1.e3,y_bnd[0]*1.e3))
pl.title('Depth=%f mm' % (depth*1e3))
pl.grid(True)
pl.colorbar()
subplots.append(subplot)
images.append(image)
inversioncoeffspos+=numampls
pass
return (fig,subplots,images)
def savetiledconcreteinverse(filename,fullinverse,reflectors,yvec,xvec,zthick,zequalszero_on_back_surface=False):
# Save thermal data as a netcdf (.nc) file
from netCDF4 import Dataset
rootgrp=Dataset(filename,"w",format="NETCDF4")
ydim=rootgrp.createDimension("z",fullinverse.shape[0])
ydim=rootgrp.createDimension("y",fullinverse.shape[1])
xdim=rootgrp.createDimension("x",fullinverse.shape[2])
zvals=rootgrp.createVariable("z","f8",("z",))
for zcnt in range(len(reflectors)): # reflectors is depth-first
# first element in each reflectors tuple is z-position, measured from the front surface, positive deep
if zequalszero_on_back_surface:
zvals[zcnt]=zthick-reflectors[len(reflectors)-1-zcnt][0]
pass
else:
zvals[zcnt]=reflectors[len(reflectors)-1-zcnt][0]
pass
pass
yvals=rootgrp.createVariable("y","f8",("y",))
yvals[:]=yvec
xvals=rootgrp.createVariable("x","f8",("x",))
xvals[:]=xvec
intensityvals=rootgrp.createVariable("sourceintensity","f8",("z","y","x"))
intensityvals[::]=fullinverse
rootgrp.close()
pass
def buildconcreteinverse(inversioncoeffs,reflectors,ygrid,xgrid,y_bnd,x_bnd,ny,nx,num_sources_y,num_sources_x):
inversioncoeffspos=0
res=np.zeros((len(reflectors)+1,ny,nx),dtype='d')
for layercnt in range(len(reflectors)+1):
if layercnt==0:
depth=0.0 # front surface: flash_source_vecs
#reflector_ny=2
#reflector_nx=2
# Use flashsourcevecs!
# !!!*** ... Do we need flashsourcecolumn scaling? (Don't think so)
#numampls=flashsourcevecs.shape[1]
numampls=num_sources_y*num_sources_x
pass
else:
(depth,reflector_ny,reflector_nx)=reflectors[len(reflectors)-layercnt]
numampls=reflector_ny*reflector_nx
pass
coeffs=inversioncoeffs[inversioncoeffspos:(inversioncoeffspos+numampls)]
#coeffsshaped=coeffs.reshape(reflector_ny,reflector_nx)
# now expand coeffsshaped into the full (ny,nx) grid
# need to be consistent with build_reflector_source_vecs
if layercnt==0:
# flash_source_vecs naturally overlap
flash_source=build_flash_source(ygrid,xgrid,y_bnd,x_bnd,num_sources_y,num_sources_x)
assert(num_sources_y*num_sources_x == numampls)
# flash_source was (4,ny,nx) (now can be more than 4)
# multiply it by source intensity coefficients
#print coeffs.shape
#print flash_source.shape
res[layercnt,:,:]=np.tensordot(coeffs,flash_source,((0,),(0,)))
pass
else:
# define geometries of each reflector at this depth
(reflector_widthy,
reflector_widthx,
reflector_posy,
reflector_posx,
reflector_ygrid,
reflector_xgrid,
reflector_bndy,
reflector_bndx)=definereflectors(y_bnd,x_bnd,reflector_ny,reflector_nx)
# Iterate over which reflector...
for refl_yidx in range(reflector_ny):
for refl_xidx in range(reflector_nx):
coeffidx=reflector_nx*refl_yidx + refl_xidx
# Determine range of xy points corresponding to this reflector
refl_xygrid=((ygrid[:,:] >= reflector_bndy[refl_yidx]) &
(ygrid[:,:] < reflector_bndy[refl_yidx+1]) &
(xgrid[:,:] >= reflector_bndx[refl_xidx]) &
(xgrid[:,:] < reflector_bndx[refl_xidx+1]))
#print(res.shape)
#print(refl_xygrid.shape)
res[layercnt,refl_xygrid]=coeffs[coeffidx]
pass # end loop reflector_nx
pass # end loop reflector_ny
pass # end layercnt != 0
inversioncoeffspos+=numampls
pass # end loop layercnt
return res # (len(reflectors)+1,ny,nx)... first layer is surface
def plotconcreteinverse(fignum,numplotrows,numplotcols,saturation_map,concreteinverse,reflectors,vmin,vmax,y_bnd,x_bnd,num_sources_y,num_sources_x):
fig=pl.figure(fignum)
pl.clf()
subplots=[]
images=[]
for subplotnum in range(1,concreteinverse.shape[0]+2):
if subplotnum <= 2:
depth=0.0
ny=num_sources_y
nx=num_sources_x
pass
else:
(depth,ny,nx)=reflectors[len(reflectors)-(subplotnum-2)]
pass
subplot=pl.subplot(numplotrows,numplotcols,subplotnum)
if subplotnum==1:
# Show saturation map
image=pl.imshow(saturation_map,extent=(x_bnd[0]*1.e3,x_bnd[-1]*1.e3,y_bnd[-1]*1.e3,y_bnd[0]*1.e3))
pl.title('Saturation map')
pass
else:
image=pl.imshow(concreteinverse[subplotnum-2,:,:]/1.e3,vmin=vmin/1.e3,vmax=vmax/1.e3,extent=(x_bnd[0]*1.e3,x_bnd[-1]*1.e3,y_bnd[-1]*1.e3,y_bnd[0]*1.e3))
pl.title('Depth=%.2f mm' % (depth*1e3))
pass
pl.grid(True)
pl.colorbar()
subplots.append(subplot)
images.append(image)
pass
return (fig,subplots,images)
def plotconcreteinversemovie(startfignum,outdirhref,outfilenametemplate,saturation_map,concreteinverse,reflectors,vmin,vmax,y_bnd,x_bnd,num_sources_y,num_sources_x,**savefigkwargs):
if outdirhref is not None:
from limatix import dc_value
pass
if outdirhref is not None and not os.path.exists(outdirhref.getpath()):
os.mkdir(outdirhref.getpath())
pass
plots=[]
images=[]
plothrefs=[]
depths=[]
for plotnum in range(concreteinverse.shape[0]+1):
fig=pl.figure(startfignum+plotnum)
pl.clf()
if plotnum <= 1:
depth=0.0
ny=num_sources_y
nx=num_sources_x
pass
else:
(depth,ny,nx)=reflectors[len(reflectors)-(plotnum-1)]
pass
#subplot=pl.subplot(numplotrows,numplotcols,subplotnum)
if plotnum==0:
# Show saturation map
image=pl.imshow(saturation_map,extent=(x_bnd[0]*1.e3,x_bnd[-1]*1.e3,y_bnd[-1]*1.e3,y_bnd[0]*1.e3))
pl.title('Saturation map')
pass
else:
image=pl.imshow(concreteinverse[plotnum-1,:,:]/1.e3,vmin=vmin/1.e3,vmax=vmax/1.e3,extent=(x_bnd[0]*1.e3,x_bnd[-1]*1.e3,y_bnd[-1]*1.e3,y_bnd[0]*1.e3))
pl.title('Depth=%.2f mm' % (depth*1e3))
pass
pl.grid(True)
pl.colorbar()
pl.xlabel('Position (mm)')
pl.ylabel('Position (mm)')
if outdirhref is not None:
outfilename=outfilenametemplate % (depth*1e3)
outfilehref=dc_value.hrefvalue(quote(outfilename),contexthref=outdirhref)
outfilepath=outfilehref.getpath()
pl.savefig(outfilepath,**savefigkwargs)
plothrefs.append(outfilehref)
pass
plots.append(fig)
images.append(image)
depths.append(depth)
pass
return (startfignum+concreteinverse.shape[0],plots,images,plothrefs,depths)
def define_curved_inversion(gi_params,gi_grid,obj,curvmat_tile,stepsizemat_tile,curvmat_hires,stepsizemat_hires,curvmat_sizeu,curvmat_sizev,num_sources_y,num_sources_x,singlestep=False):
(rho,c,alphaz,alphaxy,dy,dx,maxy,maxx,t0,dt,nt,reflectors,
trange,greensconvolution_params) = gi_params
(ny,nx,y,x,ygrid,xgrid,y_bnd,x_bnd) = gi_grid
kz=alphaz*rho*c
kx=alphaxy*rho*c
ky=alphaxy*rho*c
#eval_linelength_avgcurvature = lambda u1,v1,u2,v2: obj.implpart.surfaces[0].intrinsicparameterization.linelength_avgcurvature(obj.implpart.surfaces[0],dx,dy,u1,v1,u2,v2)
# eval_linelength_avgcurvature = lambda u1,v1,u2,v2: obj.implpart.surfaces[0].intrinsicparameterization.linelength_avgcurvature_meshbased(obj.implpart.surfaces[0],curvmat_hires,stepsizemat_hires,dx,dy,u1,v1,u2,v2)
eval_linelength_avgcurvature_mirroredbox = lambda boxu1,boxv1,boxu2,boxv2,u1,v1,u2,v2: obj.implpart.surfaces[0].intrinsicparameterization.linelength_avgcurvature_mirroredbox_meshbased(obj.implpart.surfaces[0],curvmat_hires,stepsizemat_hires,obj.implpart.surfaces[0].intrinsicparameterization.lowerleft_meaningfulunits[0],obj.implpart.surfaces[0].intrinsicparameterization.lowerleft_meaningfulunits[1],curvmat_sizeu*1.0/curvmat_hires.shape[1],curvmat_sizev*1.0/curvmat_hires.shape[0],boxu1,boxv1,boxu2,boxv2,dx,dy,u1,v1,u2,v2)
print("Building curved sourcevecs")
(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,depths,tstars,conditions,prevconditions,prevscaledconditions)=build_all_source_vecs(greensconvolution_params,dy,dx,ygrid,xgrid,y_bnd,x_bnd,rho,c,kz,ky,kx,dt,trange,reflectors,gc_kernel="opencl_interpolator_curved",eval_linelength_avgcurvature_mirroredbox=eval_linelength_avgcurvature_mirroredbox,curvmat_tile=curvmat_tile,stepsizemat_tile=stepsizemat_tile,num_sources_y=num_sources_y,num_sources_x=num_sources_x)
if NaN_in_sourcevecs([ flashsourcevecs ]):
raise NotANumberError("NaN found in flashsourcevecs")
if NaN_in_sourcevecs(reflectorsourcevecs):
raise NotANumberError("NaN found in reflectorsourcevecs")
if singlestep:
print("Generating single-step curved inversion")
(rowselects,inversions,inversionsfull,inverses,nresults)=generatesinglestepinversion(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths)
pass
else:
print("Generating curved inversion steps")
(rowselects,inversions,inversionsfull,inverses,nresults)=generateinversionsteps(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths)
pass
return (rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,depths,tstars,conditions,prevconditions,prevscaledconditions,rowselects,inversions,inversionsfull,inverses,nresults)
def define_flat_inversion(gi_params,gi_grid,num_sources_y,num_sources_x,singlestep=False):
(rho,c,alphaz,alphaxy,dy,dx,maxy,maxx,t0,dt,nt,reflectors,
trange,greensconvolution_params) = gi_params
(ny,nx,y,x,ygrid,xgrid,y_bnd,x_bnd) = gi_grid
kz=alphaz*rho*c
kx=alphaxy*rho*c
ky=alphaxy*rho*c
print("Building flat sourcevecs")
(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,depths,tstars,conditions,prevconditions,prevscaledconditions)=build_all_source_vecs(greensconvolution_params,dy,dx,ygrid,xgrid,y_bnd,x_bnd,rho,c,kz,ky,kx,dt,trange,reflectors,num_sources_y=num_sources_y,num_sources_x=num_sources_x)
if singlestep:
print("Generating single-step flat inversion")
(rowselects,inversions,inversionsfull,inverses,nresults)=generatesinglestepinversion(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths)
pass
else:
print("Generating flat inversion steps")
(rowselects,inversions,inversionsfull,inverses,nresults)=generateinversionsteps(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,tstars,ny,nx,trange,depths)
pass
return (rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,depths,tstars,conditions,prevconditions,prevscaledconditions,rowselects,inversions,inversionsfull,inverses,nresults)
def saturationcheck(thermal_data,startframe,sat_threshold=0.9):
""" Determine the fraction of thermal_data that is saturated
on or after startframe (0 based). It is assumed the highest
temperature recorded in thermal_data for a particular pixel
is the saturation value and that the thermal data has already
been background-subtracted.
A pixel is defined as saturated is defined as exceeding sat_threshold*max_for_that_pixel(thermal_data)
Returns a tuple containing a number between 0 and 1 representing
the fraction of valid pixels (not identically 0, not infinite, not NaN)
that are saturated, followed by a saturation map
"""
saturation_levels = np.max(thermal_data,axis=0)
saturated = np.sum(thermal_data[startframe:,:,:] > sat_threshold*saturation_levels[np.newaxis,:,:],axis=0) > 0
valid = np.isfinite(saturation_levels) & (saturation_levels != 0.0)
fraction_saturated = np.count_nonzero(saturated)*1.0/np.count_nonzero(valid)
return (fraction_saturated,(saturated & valid))
def num_sources(y,x,y_bnd,x_bnd,source_approx_dy,source_approx_dx):
# num_sources_y should approximately match
# (y_bnd[-1]-y_bnd[0])/source_approx_dy, BUT
# ... it should be an integer AND
# ... not larger than y.shape[0] i.e. at least 1 pixel/source
num_sources_y = int(round((y_bnd[-1]-y_bnd[0])/source_approx_dy))
if num_sources_y > y.shape[0]:
num_sources_y=y.shape[0]
pass
# num_sources_x should approximately match
# (x_bnd[-1]-x_bnd[0])/source_approx_dx, BUT
# ... it should be an integer AND
# ... not larger than x.shape[0] i.e. at least 1 pixel/source
num_sources_x = int(round((x_bnd[-1]-x_bnd[0])/source_approx_dx))
if num_sources_x > x.shape[0]:
num_sources_x=x.shape[0]
pass
return (num_sources_y,num_sources_x)
def setupinversionprob(rho,c,alphaz,alphaxy,dy,dx,maxy,maxx,t0,dt,nt,reflectors,source_approx_dy=None,source_approx_dx=None,singlestep=False):
kx=alphaxy*rho*c
ky=alphaxy*rho*c
kz=alphaz*rho*c
trange=t0+np.arange(nt,dtype='d')*dt
greensconvolution_params=greensconvolution.greensconvolution_calc.read_greensconvolution()
gi_params=(rho,c,alphaz,alphaxy,dy,dx,maxy,maxx,t0,dt,nt,reflectors,
trange,greensconvolution_params)
gi_grid = build_gi_grid(dy,maxy,
dx,maxx)
(ny,nx,
y,x,
ygrid,xgrid,
y_bnd,x_bnd) = gi_grid
num_sources_y=2
num_sources_x=2
if source_approx_dy is not None or source_approx_dx is not None:
(num_sources_y,num_sources_x) = num_sources(y,x,y_bnd,x_bnd,source_approx_dy,source_approx_dx)
pass
#print("Building source vecs")
(rowscaling,flashsourcecolumnscaling,flashsourcevecs,reflectorcolumnscaling,reflectorsourcevecs,depths,tstars,conditions,prevconditions,prevscaledconditions,rowselects,inversions,inversionsfull,inverses,nresults)=define_flat_inversion(gi_params,gi_grid,num_sources_y,num_sources_x,singlestep=singlestep)
inversionprob=(kx,ky,kz,
ny,nx,
y,x,
ygrid,xgrid,
y_bnd,x_bnd,
num_sources_y,num_sources_x,
trange,
rowscaling,
flashsourcecolumnscaling,flashsourcevecs,
reflectorcolumnscaling,reflectorsourcevecs,
depths,tstars,
conditions,prevconditions,prevscaledconditions,
rowselects,inversions,inversionsfull,inverses,nresults)
return inversionprob
def perform_flat_inversion(rho,c,alphaz,alphaxy,y0,x0,dy,dx,tile_size_y,tile_size_x,xydownsample,reflectors,source_approx_dy,source_approx_dx,tikparam,data_t0,dt,flashframe,flashtime,frames_to_discard,frames_to_discard_prior,data,singlestep,parallelevaluate,OpenCL_Device_Type,OpenCL_Device_Name,numplotrows,numplotcols,plot_min_power_per_area,plot_max_power_per_area,nextfignum):
# Perform background subtraction
bkgnd_frames = flashframe-frames_to_discard_prior
background=np.mean(data[:bkgnd_frames,:,:],axis=0)
startframe = flashframe + frames_to_discard-1
data_timebase=data_t0+np.arange(data.shape[0],dtype='d')*dt
t0=data_timebase[startframe] - flashtime
diff = data-background[np.newaxis,:,:]
(saturation_fraction,saturation_map)=saturationcheck(diff,startframe)
if saturation_fraction > .2:
raise ValueError("TWIRAW_greensinversion: ERROR: %.1f%% of pixels are saturated at least once beyond start frame!" % (saturation_fraction*100.0))
if saturation_fraction > .02:
sys.stderr.write("TWIRAW_greensinversion: WARNING: %.1f%% of pixels are saturated at least once beyond start frame!\n" % (saturation_fraction*100.0))
pass
deepest_tstar = reflectors[0][0]**2.0/(np.pi*alphaz)
endframe = np.argmin(np.abs(data_timebase-data_timebase[flashframe]-flashtime-deepest_tstar*2.0)) # see also generateinversionsteps() call to timelimitmatrix()
nt=data_timebase[startframe:endframe].shape[0]
inversionprob = setupinversionprob(rho,c,alphaz,alphaxy,dy,dx,tile_size_y,tile_size_x,t0,dt,nt,reflectors,source_approx_dy=source_approx_dy,source_approx_dx=source_approx_dx,singlestep=singlestep)
(kx,ky,kz,
ny,nx,
y,x,
ygrid,xgrid,
y_bnd,x_bnd,
num_sources_y,num_sources_x,
trange,
rowscaling,
flashsourcecolumnscaling,flashsourcevecs,
reflectorcolumnscaling,reflectorsourcevecs,
depths,tstars,
conditions,prevconditions,prevscaledconditions,
rowselects,inversions,inversionsfull,inverses,nresults) = inversionprob
(minyminx_corners,yranges,xranges,contributionprofiles)=build_tiled_rectangle(ny,nx,dy,dx,reflectors,diff,xydownsample)
inputmats = [ diff[startframe:endframe,(yidx*xydownsample):((yidx+ny)*xydownsample):xydownsample,(xidx*xydownsample):((xidx+nx)*xydownsample):xydownsample] for (yidx,xidx) in minyminx_corners ]
print("Filling holes...")
inputmats_holesfilled = [ fillholes_flat(inputmat) for inputmat in inputmats ]
print("Done filling holes.")
if parallelevaluate:
inversionevalfunc=parallelperforminversionsteps
OpenCL_CTX=greensconvolution_params.get_opencl_context() #greensinversion.inversion.Get_OpenCL_Context()
pass
else:
inversionevalfunc=serialperforminversionsteps
OpenCL_CTX=None
pass
if nextfignum is not None:
from matplotlib import pyplot as pl
# tikparam diagnostic plots
pl.figure(nextfignum)
pl.clf()
for inversioncnt in range(len(inversions)):
pl.plot(inverses[inversioncnt][1])
pass
pl.xlabel('Singular value index')
pl.ylabel('Magnitude')
nextfignum+=1
pass
# evaluate inversion
(inversioncoeffs_list,errs_list,tikparams_list) = inversionevalfunc(OpenCL_CTX,
rowselects,
inversions,
inversionsfull,
inverses,
nresults,
inputmats_holesfilled,
tikparam)
# Generate concrete representation of inversion
fullinverse=np.zeros((len(reflectors)+1,diff.shape[1]//xydownsample,diff.shape[2]//xydownsample),dtype='d')
fullinverse_x_bnd=x0-dx*xydownsample/2.0 + np.arange(diff.shape[2]//xydownsample+1,dtype='d')*dx*xydownsample
fullinverse_y_bnd=y0-dy*xydownsample/2.0 + np.arange(diff.shape[1]//xydownsample+1,dtype='d')*dy*xydownsample
for tile_idx in range(len(minyminx_corners)):
(yidx,xidx)=minyminx_corners[tile_idx]
fullinverse[:,yidx:(yidx+ny),xidx:(xidx+nx)] += buildconcreteinverse(inversioncoeffs_list[tile_idx],reflectors,ygrid,xgrid,y_bnd,x_bnd,ny,nx,num_sources_y,num_sources_x)*contributionprofiles[tile_idx]
pass
# Plot concrete inverse as a bunch of subplots
if nextfignum is not None:
(fig,subplots,images)=plotconcreteinverse(nextfignum,numplotrows,numplotcols,saturation_map,fullinverse,reflectors,plot_min_power_per_area,plot_max_power_per_area,fullinverse_y_bnd,fullinverse_x_bnd,num_sources_y,num_sources_x)
nextfignum+=1
pass
# Plot separate plots with concrete inverse
if nextfignum is not None:
(nextfignum,plots,images,plothrefs,depths) = plotconcreteinversemovie(nextfignum,None,None,saturation_map,fullinverse,reflectors,plot_min_power_per_area,plot_max_power_per_area,fullinverse_y_bnd,fullinverse_x_bnd,num_sources_y,num_sources_x,dpi=300)
pass
inversion_info=(minyminx_corners,
yranges,
xranges,
contributionprofiles,
inversioncoeffs_list,
errs_list,
tikparams_list,
fullinverse_y_bnd,
fullinverse_x_bnd)
return (inversionprob,saturation_map,
inversion_info,
fullinverse,
nextfignum)
| {"hexsha": "89904b38b63e62d1830b645efed8af35adcb7426", "size": 52891, "ext": "py", "lang": "Python", "max_stars_repo_path": "greensinversion/inversion.py", "max_stars_repo_name": "isuthermography/greensinversion", "max_stars_repo_head_hexsha": "92f272a3649bb2f6b132f8cd239edd68dd2a6a62", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-25T23:23:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-25T23:23:04.000Z", "max_issues_repo_path": "greensinversion/inversion.py", "max_issues_repo_name": "isuthermography/greensinversion", "max_issues_repo_head_hexsha": "92f272a3649bb2f6b132f8cd239edd68dd2a6a62", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-10-04T01:43:25.000Z", "max_issues_repo_issues_event_max_datetime": "2018-11-28T17:59:12.000Z", "max_forks_repo_path": "greensinversion/inversion.py", "max_forks_repo_name": "isuthermography/greensinversion", "max_forks_repo_head_hexsha": "92f272a3649bb2f6b132f8cd239edd68dd2a6a62", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-25T23:23:06.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-25T23:23:06.000Z", "avg_line_length": 42.4145950281, "max_line_length": 529, "alphanum_fraction": 0.6396740466, "include": true, "reason": "import numpy", "num_tokens": 12751} |
import plotly
py = plotly.plotly("iancze", "0ttojbuvyj")
import StellarSpectra
from StellarSpectra import spectrum
from StellarSpectra import constants as C
from spectrum import DataSpectrum
import numpy as np
import sys
from astropy.io import ascii
myspec = DataSpectrum.open("/home/ian/Grad/Research/Disks/StellarSpectra/tests/WASP14/WASP-14_2009-06-15_04h13m57s_cb.spec.flux",
orders=np.array([22]))
#Shift wl as close to 0.
vz = -15
myspec.wls = myspec.wls * np.sqrt((C.c_kms + vz) / (C.c_kms - vz))
def return_line_labels(wl, tol=1):
'''Given a wl array, return the nearest n line labels next to the line, that are within
tolerance = 1 Ang of each point.'''
#for linelist_air.dat, col_starts=[3, 20], col_ends=[17, 28]
#for linelist_kurucz.dat, col_starts=[3, 13], col_ends=[10, 20]
lines = ascii.read("linelist_kurucz.dat", Reader=ascii.FixedWidth, col_starts=[3, 13], col_ends=[10, 20],
converters={'line': [ascii.convert_numpy(np.float)],
'element': [ascii.convert_numpy(np.str)]}, guess=False)
lines['line'] = 10 * lines['line'] #Convert from nanometers to AA
#truncate list to speed execution
ind = (lines['line'] >= np.min(wl) - tol) & (lines['line'] <= np.max(wl) + tol)
lines = lines[ind]
#for each wl, query all known lines that are within tol, add these to the set of known lines
line_labels = []
for w in wl:
#Find nearby wl lines within tol
ind = (w - tol <= lines['line']) & (lines['line'] <= w + tol)
#Truncated lines
lines_trunc = lines[ind]
#Sort them by closeness to current pixel
distances = np.abs(w - lines_trunc['line'])
distance_ind = np.argsort(distances)
#Sort lines by closest label
lines_sort = lines_trunc[distance_ind]
#Take only 6 lines
lines_clip = lines_sort[:6]
#Create a new set
labels = "\n".join(["{} {:.2f}".format(label,line) for line, label in lines_clip])
line_labels.append(labels)
return line_labels
line_list = return_line_labels(myspec.wls[0], tol=0.3)
data = {'name': 'WASP-14',
'x': myspec.wls[0],
'y': myspec.fls[0],
'text': line_list,
'type': 'scatter',
'mode': 'lines+markers'
}
layout = {
'xaxis': {'title': 'Wavelength (AA)'},
'yaxis': {'title': 'Flux (ergs/s/AA/cm^2)'},
'title': 'WASP-14'
}
response = py.plot(data, layout=layout, filename='Spectra/WASP-14', fileopt='overwrite', world_readable=True)
url = response['url']
| {"hexsha": "63ca23ea84bba11db92fce14d8690c242a5c9b59", "size": 2635, "ext": "py", "lang": "Python", "max_stars_repo_path": "attic/plotly_line_label.py", "max_stars_repo_name": "jason-neal/Starfish", "max_stars_repo_head_hexsha": "4ffa45e0190fb6f3262511d57d1a563e5ee711de", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-07-10T00:06:36.000Z", "max_stars_repo_stars_event_max_datetime": "2017-07-10T00:06:36.000Z", "max_issues_repo_path": "attic/plotly_line_label.py", "max_issues_repo_name": "jason-neal/Starfish", "max_issues_repo_head_hexsha": "4ffa45e0190fb6f3262511d57d1a563e5ee711de", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "attic/plotly_line_label.py", "max_forks_repo_name": "jason-neal/Starfish", "max_forks_repo_head_hexsha": "4ffa45e0190fb6f3262511d57d1a563e5ee711de", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2016-06-11T09:48:16.000Z", "max_forks_repo_forks_event_max_datetime": "2019-08-07T19:52:41.000Z", "avg_line_length": 32.1341463415, "max_line_length": 129, "alphanum_fraction": 0.6178368121, "include": true, "reason": "import numpy,from astropy", "num_tokens": 751} |
#include <boost/python/class.hpp>
#include <boost/python/enum.hpp>
#include <boost/python/args.hpp>
#include <boost/python/return_value_policy.hpp>
#include <boost/python/return_by_value.hpp>
#include <boost/python/return_internal_reference.hpp>
#include <scitbx/array_family/boost_python/shared_wrapper.h>
#include <cctbx/miller/amplitude_normalisation.h>
namespace cctbx { namespace miller { namespace boost_python {
template <typename FloatType>
struct amplitude_normalisation_wrapper
{
typedef amplitude_normalisation<FloatType> wt;
typedef typename wt::float_type float_type;
static void wrap() {
using namespace boost::python;
return_value_policy<return_by_value> rbv;
typedef return_internal_reference<> rir;
af::boost_python::shared_wrapper<
typename wt::form_factor_t, rir>::wrap(
"shared_gaussian_form_factors");
class_<wt>("amplitude_normalisation", no_init)
.def(init<af::const_ref<typename wt::form_factor_t> const &,
af::const_ref<float_type> const &,
float_type,
float_type,
uctbx::unit_cell const &,
sgtbx::space_group const &,
af::const_ref<index<> > const &>(
(arg("form_factors"),
arg("multiplicities"),
arg("wilson_intensity_scale_factor"),
arg("wilson_b"),
arg("unit_cell"),
arg("space_group"),
arg("indices"))))
.add_property("normalisations", make_getter(&wt::normalisations, rbv))
;
}
};
void wrap_amplitude_normalisation() {
amplitude_normalisation_wrapper<double>::wrap();
}
}}} // cctbx::miller::boostpython
| {"hexsha": "86f1078b1451a6770cf0e6c23692417b6f63687f", "size": 1758, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cctbx/miller/boost_python/amplitude_normalisation.cpp", "max_stars_repo_name": "rimmartin/cctbx_project", "max_stars_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 155.0, "max_stars_repo_stars_event_min_datetime": "2016-11-23T12:52:16.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T15:35:44.000Z", "max_issues_repo_path": "cctbx/miller/boost_python/amplitude_normalisation.cpp", "max_issues_repo_name": "rimmartin/cctbx_project", "max_issues_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 590.0, "max_issues_repo_issues_event_min_datetime": "2016-12-10T11:31:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-30T23:10:09.000Z", "max_forks_repo_path": "cctbx/miller/boost_python/amplitude_normalisation.cpp", "max_forks_repo_name": "rimmartin/cctbx_project", "max_forks_repo_head_hexsha": "644090f9432d9afc22cfb542fc3ab78ca8e15e5d", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 115.0, "max_forks_repo_forks_event_min_datetime": "2016-11-15T08:17:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T15:30:14.000Z", "avg_line_length": 32.5555555556, "max_line_length": 78, "alphanum_fraction": 0.6439135381, "num_tokens": 381} |
from torch_rl.training.core import HorizonTrainer, mse_loss
from torch_rl.memory import GeneralisedMemory
from torch.optim import Adam
from torch_rl.utils import to_tensor as tt
import torch as tor
from collections import deque
from torch_rl.utils import prGreen
import time
import sys
from torch_rl.utils import *
import numpy as np
def queue_to_array(q):
q.put(False)
arr = []
while True:
item = q.get()
if item:
arr.append(item)
else:
break
return np.asarray(arr)
class AdvantageEstimator(object):
def __init__(self, env, policy_network, critic_network, nsteps, gamma, lam, replay_memory, hindsight_points=None):
self.env = env
self.policy_network = policy_network
self.critic_network = critic_network
nenv = 1
self.obs = env.reset()
self.gamma = gamma
self.lam = lam
self.nsteps = nsteps
self.state = None
self.done = False
self.global_step = 0
self.episodes = 0
self.replay_memory = replay_memory
def run(self):
mb_obs, mb_rewards, mb_actions, mb_values, mb_dones, mb_logpacs = [], [], [], [], [], []
mb_state = self.state
epinfos = []
self.critic_network.cpu()
#
for _ in range(self.nsteps):
actions, values = self.policy_network(tt(self.obs, cuda=False).view(1,-1))
logpacs = self.policy_network.logprob(actions)
mb_obs.append(self.obs.copy().flatten())
mb_actions.append(actions.data.numpy().flatten())
mb_values.append(values.detach().data.numpy().flatten())
mb_logpacs.append(logpacs.data.numpy().flatten())
mb_dones.append(self.done)
a = actions.data.numpy().flatten()
obs, reward, self.done, infos = self.env.step(a)
q = self.critic_network(tt(self.obs.reshape(1,-1), cuda=False), actions)
#Additional step in comparison to PPO
self.replay_memory.append(obs, a, reward, self.done, extra_info=np.hstack((mb_logpacs[-1],q.cpu().data.numpy().flatten())))
self.obs = obs
self.global_step += 1
mb_rewards.append(reward)
if self.done:
self.episodes+=1
logger.logkv("episodes", self.episodes)
self.obs = self.env.reset()
self.critic_network.cuda()
# batch of steps to batch of rollouts
mb_obs = np.asarray(mb_obs, dtype=np.float32).reshape(self.nsteps, -1)
mb_rewards = np.asarray(mb_rewards, dtype=np.float32).reshape(self.nsteps, -1)
mb_actions = np.asarray(mb_actions, dtype=np.float32).reshape(self.nsteps,self.env.action_space.shape[0])
mb_values = np.asarray(mb_values, dtype=np.float32).reshape(self.nsteps, -1)
mb_logpacs = np.asarray(mb_logpacs, dtype=np.float32).reshape(self.nsteps, self.env.action_space.shape[0])
mb_dones = np.asarray(mb_dones, dtype=np.bool).reshape(self.nsteps, -1)
action, last_values = self.policy_network(tt(self.obs.reshape(1,-1), cuda=False))
action, last_values = action.data.numpy().reshape(-1), last_values.data.numpy().reshape(-1)
# discount/bootstrap off value fn
mb_returns = np.zeros_like(mb_rewards)
mb_advs = np.zeros_like(mb_rewards)
lastgaelam = 0
for t in reversed(range(self.nsteps)):
if t == self.nsteps - 1:
nextnonterminal = 1.0 - self.done
nextvalues = last_values
else:
nextnonterminal = 1.0 - mb_dones[t + 1]
nextvalues = mb_values[t + 1]
delta = mb_rewards[t] + self.gamma * nextvalues * nextnonterminal - mb_values[t]
mb_advs[t] = lastgaelam = delta + self.gamma * self.lam * nextnonterminal * lastgaelam
mb_returns = mb_advs + mb_values
return mb_obs, mb_returns, mb_dones, mb_actions, mb_values, mb_logpacs, mb_state
# obs, returns, masks, actions, values, neglogpacs, states = runner.run()
def constfn(val):
def f(_):
return val
return f
import copy
from torch_rl.memory import GeneralisedHindsightMemory
class HERIPGGPUPPOTrainer(HorizonTrainer):
"""
Implementation of interpolated policy gradient for PPO
"""
mvavg_reward = deque(maxlen=100)
def __init__(self, env, policy_network, critic_network, replay_memory, max_episode_len=500, gamma=.99,
lr=3e-4, n_steps=40, epsilon=0.2, optimizer=None, lmda=0.95, ent_coef=0., n_update_steps=10,
n_minibatches=1, v=0.5, tau=1e-3):
super(HERIPGGPUPPOTrainer, self).__init__(env)
self.n_minibatches = n_minibatches
self.lr = lr
# Replay memory for calculating the online policy gradient
self.replay_memory = replay_memory
self.max_episode_len = max_episode_len
self.epsilon = epsilon
self.gamma = gamma
self.lmda = lmda
self.optimizer = Adam(policy_network.parameters(), lr=lr, weight_decay=0.001) if optimizer is None else optimizer
self.goal_based = hasattr(env, "goal")
self.policy_network = policy_network
self.ent_coef = ent_coef
self.n_update_steps = n_update_steps
self.n_steps = n_steps
self.advantage_estimator = AdvantageEstimator(env, self.policy_network, critic_network, n_steps, self.gamma, self.lmda, self.replay_memory)
self.v = v
self.tau = tau
self.critic_network = critic_network
self.target_critic_network = cuda_if_available(copy.deepcopy(self.critic_network))
self.target_policy_network = cuda_if_available(copy.deepcopy(self.policy_network))
self.critic_optimizer = Adam(critic_network.parameters(), lr=3e-4, weight_decay=0.001)
def _off_policy_loss(self, batch_size):
s1, a1, r, s2, terminal, goal, add_info = self.replay_memory.sample_and_split(batch_size)
s1 = tt(s1).cuda()
a1 = tt(a1).cuda()
r = tt(r).cuda()
s2 = tt(s2).cuda()
oldlogpac = tt(add_info[:, :-1])
oldq = tt(add_info[:, -1])
#import pdb; pdb.set_trace()
a2, v_pred = self.target_policy_network(s2)
# Take deterministic step by taking the mean of the distribution
a2 = self.target_policy_network.mu()
q = self.critic_network(s1, a1)
#q_clipped = oldq + tor.clamp(q - oldq, -self.epsilon, self.epsilon)
q_target = r + self.gamma*(self.target_critic_network(s2,a2))
critloss1 = (q_target - q)**2
# critloss2 = (q_target - q_clipped)**2
# critloss = .5 * tor.mean(tor.max(critloss1, critloss2))
critloss = .5 * tor.mean(critloss1)
a, v = self.policy_network(s1)
a = self.policy_network.mu()
ratio = tor.exp(self.policy_network.logprob(a) - oldlogpac)
qestimate = self.critic_network(s1, a)
#pgloss1 = -qestimate * ratio
#pgloss2 = -qestimate * tor.clamp(ratio, 1. - self.epsilon, 1. + self.epsilon)
#pgloss = -tor.mean(tor.max(pgloss1, pgloss2))
pgloss = -tor.mean(qestimate)
mean_q_estimate = tor.mean(qestimate)
mean_ratio = tor.mean(ratio)
logger.logkv("erpgloss", pgloss.cpu().data.numpy())
logger.logkv("qloss", critloss.cpu().data.numpy())
logger.logkv("meanq", mean_q_estimate.cpu().data.numpy())
logger.logkv("ratio", mean_ratio.cpu().data.numpy())
logger.logkv("reward_mean", r.cpu().data.numpy().mean())
return pgloss + critloss
def _ppo_loss(self, bobs, bactions, badvs, breturns, blogpacs, bvalues):
OBS = tt(bobs)
A = tt(bactions)
ADV = tt(badvs)
R = tt(breturns)
OLDLOGPAC = tt(blogpacs)
OLDVPRED = tt(bvalues)
self.policy_network(OBS)
logpac = self.policy_network.logprob(A)
entropy = tor.mean(self.policy_network.entropy())
#### Value function loss ####
#print(bobs)
actions_new, v_pred = self.policy_network(tt(bobs))
v_pred_clipped = OLDVPRED + tor.clamp(v_pred - OLDVPRED, -self.epsilon, self.epsilon)
v_loss1 = (v_pred - R)**2
v_loss2 = (v_pred_clipped - R)**2
v_loss = .5 * tor.mean(tor.max(v_loss1, v_loss2))
### Ratio calculation ####
# In the baselines implementation these are negative logits, then it is flipped
ratio = tor.exp(logpac - OLDLOGPAC)
### Policy gradient calculation ###
pg_loss1 = -ADV * ratio
pg_loss2 = -ADV * tor.clamp(ratio, 1. - self.epsilon, 1. + self.epsilon)
pg_loss = tor.mean(tor.max(pg_loss1, pg_loss2))
approxkl = .5 * tor.mean((logpac - OLDLOGPAC)**2)
ppo_loss = v_loss + pg_loss + self.ent_coef*entropy
logger.logkv("siglog", self.policy_network.siglog.cpu().data.numpy()[0])
logger.logkv("pgloss", pg_loss.cpu().data.numpy())
logger.logkv("vfloss", v_loss.cpu().data.numpy())
logger.logkv("vfloss", v_loss.cpu().data.numpy())
logger.logkv("approxkl", approxkl.cpu().data.numpy())
logger.logkv("pentropy", entropy.cpu().data.numpy())
return ppo_loss
def _horizon_step(self):
obs, returns, masks, actions, values, logpacs, states = self.advantage_estimator.run() #pylint: disable=E0632
# Normalize advantages over episodes
advs = returns - values
prev_ind = 0
for ind in np.argwhere(masks == True)[:, 0]:
episode_advs = advs[prev_ind:ind+1]
advs[prev_ind:ind+1] = (episode_advs - episode_advs.mean())/(episode_advs.std() + 1e-8)
prev_ind = ind+1
episode_advs = advs[prev_ind:-1]
advs[prev_ind:-1] = (episode_advs - episode_advs.mean())/(episode_advs.std() + 1e-8)
nbatch_train = self.n_steps // self.n_minibatches
self.policy_network.cuda()
#self.optimizer = Adam(self.policy_network.parameters(), lr=self.lr)
if states is None: # nonrecurrent version
inds = np.arange(self.n_steps)
for _ in range(self.n_update_steps):
np.random.shuffle(inds)
for start in range(0, self.n_steps, nbatch_train):
end = start + nbatch_train
mbinds = inds[start:end]
bobs, breturns, bmasks, bactions, bvalues, blogpacs, badvs = map(lambda arr: arr[mbinds], (obs, returns,
masks, actions, values, logpacs, advs))
# This introduces bias since the advantages can be normalized over more episodes
#advs = (advs - advs.mean()) / (advs.std() + 1e-8)
ppo_loss = self._ppo_loss(bobs, bactions, badvs, breturns, blogpacs, bvalues)
off_loss = self._off_policy_loss(nbatch_train)
loss = self.v*ppo_loss + (1-self.v) * off_loss
self.optimizer.zero_grad()
self.critic_optimizer.zero_grad()
loss.backward()
self.optimizer.step()
self.critic_optimizer.step()
# Soft updates for target policies and critic
# Soft updates of critic don't help
soft_update(self.target_policy_network, self.policy_network, self.tau)
soft_update(self.target_critic_network, self.critic_network, self.tau)
#Push to CPU
self.policy_network.cpu()
logger.dumpkvs()
if __name__ == '__main__':
from torch_rl.envs.wrappers import *
import gym
from gym.wrappers import Monitor
from torch_rl.models.ppo import ActorCriticPPO
from torch_rl.utils import *
from torch_rl.utils import logger
from torch_rl.envs import EnvLogger
import sys
logger.configure(clear=False)
monitor = Monitor(EnvLogger(NormalisedActionsWrapper(gym.make("Pendulum-v0"))), directory="./stats", force=True,
video_callable=False, write_upon_reset=True)
env = RunningMeanStdNormalize(monitor)
print(env.observation_space.shape)
with tor.cuda.device(1):
policy_network = ActorCriticPPO([env.observation_space.shape[0], 64, 64, env.action_space.shape[0]])
policy_network.apply(gauss_init(0, np.sqrt(2)))
trainer = GPUPPO(policy_network=policy_network, env=env, n_update_steps=4, n_steps=40)
trainer.train(horizon=100000, max_episode_len=500)
| {"hexsha": "fe23bbb1a2f7e5bf658c6f210280092a7bbb5ca7", "size": 12599, "ext": "py", "lang": "Python", "max_stars_repo_path": "torch_rl/training/her_ipgppo.py", "max_stars_repo_name": "JimmyMVP/plain_rl", "max_stars_repo_head_hexsha": "4780f05fffb62533a339197b49de487cdc9d9954", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2018-03-18T21:27:59.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-13T20:15:05.000Z", "max_issues_repo_path": "torch_rl/training/her_ipgppo.py", "max_issues_repo_name": "JimmyMVP/plain_rl", "max_issues_repo_head_hexsha": "4780f05fffb62533a339197b49de487cdc9d9954", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-03-19T09:51:04.000Z", "max_issues_repo_issues_event_max_datetime": "2018-03-19T10:11:02.000Z", "max_forks_repo_path": "torch_rl/training/her_ipgppo.py", "max_forks_repo_name": "JimmyMVP/plain_rl", "max_forks_repo_head_hexsha": "4780f05fffb62533a339197b49de487cdc9d9954", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2018-03-10T09:17:10.000Z", "max_forks_repo_forks_event_max_datetime": "2019-12-17T20:19:56.000Z", "avg_line_length": 36.4132947977, "max_line_length": 147, "alphanum_fraction": 0.6210810382, "include": true, "reason": "import numpy", "num_tokens": 3137} |
section \<open>Preliminaries\<close>
theory Prelim
imports
"Fresh_Identifiers.Fresh_String"
"Bounded_Deducibility_Security.Trivia"
begin
subsection \<open>The basic types\<close>
(* This version of string is needed for code generation: *)
definition "emptyStr = STR ''''"
(* The users of the system: *)
datatype name = Nam String.literal
definition "emptyName \<equiv> Nam emptyStr"
datatype inform = Info String.literal
definition "emptyInfo \<equiv> Info emptyStr"
datatype user = Usr name inform
fun nameUser where "nameUser (Usr name info) = name"
fun infoUser where "infoUser (Usr name info) = info"
definition "emptyUser \<equiv> Usr emptyName emptyInfo"
typedecl raw_data
code_printing type_constructor raw_data \<rightharpoonup> (Scala) "java.io.File"
(* Images (currently, pdf, to be changed): *)
datatype img = emptyImg | Imag raw_data
(* Visibility outside the current api: either friends-only or public
(i.e., exportable outside to the other apis): *)
datatype vis = Vsb String.literal
(* Accepted values: friend and public *)
abbreviation "FriendV \<equiv> Vsb (STR ''friend'')"
(* abbreviation "InternalV \<equiv> Vsb (STR ''internal'')" *)
abbreviation "PublicV \<equiv> Vsb (STR ''public'')"
fun stringOfVis where "stringOfVis (Vsb str) = str"
(* A post consists of a string for title, a string for its text,
a (possibly empty) image and a visibility specification: *)
datatype title = Tit String.literal
definition "emptyTitle \<equiv> Tit emptyStr"
datatype "text" = Txt String.literal
definition "emptyText \<equiv> Txt emptyStr"
datatype post = Pst title "text" img (* vis *)
(* Getters: *)
fun titlePost where "titlePost (Pst title text img) = title"
fun textPost where "textPost (Pst title text img) = text"
fun imgPost where "imgPost (Pst title text img) = img"
(* fun visPost where "visPost (Pst title text img vis) = vis" *)
(* Setters: *)
fun setTitlePost where "setTitlePost (Pst title text img) title' = Pst title' text img"
fun setTextPost where "setTextPost(Pst title text img) text' = Pst title text' img"
fun setImgPost where "setImgPost (Pst title text img) img' = Pst title text img'"
(* fun setVisPost where "setVisPost (Pst title text img vis) vis' = Pst title text img vis'" *)
(* *)
definition emptyPost :: post where
"emptyPost \<equiv> Pst emptyTitle emptyText emptyImg" (* FriendV" *)
(* initially set to the lowest visibility: friend *)
lemma titlePost_emptyPost[simp]: "titlePost emptyPost = emptyTitle"
and textPost_emptyPost[simp]: "textPost emptyPost = emptyText"
and imgPost_emptyPost[simp]: "imgPost emptyPost = emptyImg"
(* and visPost_emptyPost[simp]: "visPost emptyPost = FriendV" *)
unfolding emptyPost_def by simp_all
lemma set_get_post[simp]:
"titlePost (setTitlePost ntc title) = title"
"titlePost (setTextPost ntc text) = titlePost ntc"
"titlePost (setImgPost ntc img) = titlePost ntc"
(* "titlePost (setVisPost ntc vis) = titlePost ntc" *)
(* *)
"textPost (setTitlePost ntc title) = textPost ntc"
"textPost (setTextPost ntc text) = text"
"textPost (setImgPost ntc img) = textPost ntc"
(* "textPost (setVisPost ntc vis) = textPost ntc" *)
(* *)
"imgPost (setTitlePost ntc title) = imgPost ntc"
"imgPost (setTextPost ntc text) = imgPost ntc"
"imgPost (setImgPost ntc img) = img"
(* "imgPost (setVisPost ntc vis) = imgPost ntc" *)
(* *)
(*
"visPost (setTitlePost ntc title) = visPost ntc"
"visPost (setTextPost ntc text) = visPost ntc"
"visPost (setImgPost ntc img) = visPost ntc"
"visPost (setVisPost ntc vis) = vis"
*)
(* *)
by(cases ntc, auto)+
lemma setTextPost_absorb[simp]:
"setTitlePost (setTitlePost pst tit) tit1 = setTitlePost pst tit1"
"setTextPost (setTextPost pst txt) txt1 = setTextPost pst txt1"
"setImgPost (setImgPost pst img) img1 = setImgPost pst img1"
(* "setVisPost (setVisPost pst vis) vis1 = setVisPost pst vis1" *)
by (cases pst, auto)+
datatype password = Psw String.literal
definition "emptyPass \<equiv> Psw emptyStr"
datatype salt = Slt String.literal
definition "emptySalt \<equiv> Slt emptyStr"
(* Information associated to requests for registration: both for users and apis *)
datatype requestInfo = ReqInfo String.literal
definition "emptyRequestInfo \<equiv> ReqInfo emptyStr"
subsection \<open>Identifiers\<close>
datatype apiID = Aid String.literal
datatype userID = Uid String.literal
datatype postID = Pid String.literal
definition "emptyApiID \<equiv> Aid emptyStr"
definition "emptyUserID \<equiv> Uid emptyStr"
definition "emptyPostID \<equiv> Pid emptyStr"
(* *)
fun apiIDAsStr where "apiIDAsStr (Aid str) = str"
definition "getFreshApiID apiIDs \<equiv> Aid (fresh (set (map apiIDAsStr apiIDs)) (STR ''1''))"
lemma ApiID_apiIDAsStr[simp]: "Aid (apiIDAsStr apiID) = apiID"
by (cases apiID) auto
lemma member_apiIDAsStr_iff[simp]: "str \<in> apiIDAsStr ` apiIDs \<longleftrightarrow> Aid str \<in> apiIDs"
by (metis ApiID_apiIDAsStr image_iff apiIDAsStr.simps)
lemma getFreshApiID: "\<not> getFreshApiID apiIDs \<in>\<in> apiIDs"
using fresh_notIn[of "set (map apiIDAsStr apiIDs)"] unfolding getFreshApiID_def by auto
(* *)
fun userIDAsStr where "userIDAsStr (Uid str) = str"
definition "getFreshUserID userIDs \<equiv> Uid (fresh (set (map userIDAsStr userIDs)) (STR ''2''))"
lemma UserID_userIDAsStr[simp]: "Uid (userIDAsStr userID) = userID"
by (cases userID) auto
lemma member_userIDAsStr_iff[simp]: "str \<in> userIDAsStr ` (set userIDs) \<longleftrightarrow> Uid str \<in>\<in> userIDs"
by (metis UserID_userIDAsStr image_iff userIDAsStr.simps)
lemma getFreshUserID: "\<not> getFreshUserID userIDs \<in>\<in> userIDs"
using fresh_notIn[of "set (map userIDAsStr userIDs)"] unfolding getFreshUserID_def by auto
(* *)
fun postIDAsStr where "postIDAsStr (Pid str) = str"
definition "getFreshPostID postIDs \<equiv> Pid (fresh (set (map postIDAsStr postIDs)) (STR ''3''))"
lemma PostID_postIDAsStr[simp]: "Pid (postIDAsStr postID) = postID"
by (cases postID) auto
lemma member_postIDAsStr_iff[simp]: "str \<in> postIDAsStr ` (set postIDs) \<longleftrightarrow> Pid str \<in>\<in> postIDs"
by (metis PostID_postIDAsStr image_iff postIDAsStr.simps)
lemma getFreshPostID: "\<not> getFreshPostID postIDs \<in>\<in> postIDs"
using fresh_notIn[of "set (map postIDAsStr postIDs)"] unfolding getFreshPostID_def by auto
end
| {"author": "isabelle-prover", "repo": "mirror-afp-devel", "sha": "c84055551f07621736c3eb6a1ef4fb7e8cc57dd1", "save_path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel", "path": "github-repos/isabelle/isabelle-prover-mirror-afp-devel/mirror-afp-devel-c84055551f07621736c3eb6a1ef4fb7e8cc57dd1/thys/CoSMeDis/Prelim.thy"} |
# Created by Qingzhi Ma at 2019-07-23
# All right reserved
# Department of Computer Science
# the University of Warwick
# Q.Ma.2@warwick.ac.uk
import os
import os.path
import warnings
from datetime import datetime
from multiprocessing import set_start_method as set_start_method_cpu
import dill
import numpy as np
import torch
from torch.multiprocessing import set_start_method as set_start_method_torch
from dbestclient.catalog.catalog import DBEstModelCatalog
from dbestclient.executor.queryengine import QueryEngine
from dbestclient.executor.queryenginemdn import (
MdnQueryEngine, MdnQueryEngineGoGs, MdnQueryEngineXCategorical,
MdnQueryEngineXCategoricalOneModel)
from dbestclient.io.sampling import DBEstSampling
from dbestclient.ml.modeltrainer import (GroupByModelTrainer, KdeModelTrainer,
SimpleModelTrainer)
from dbestclient.ml.modelwraper import (GroupByModelWrapper,
get_pickle_file_name)
from dbestclient.parser.parser import DBEstParser
from dbestclient.tools.dftools import (get_group_count_from_df,
get_group_count_from_summary_file,
get_group_count_from_table)
from dbestclient.tools.running_parameters import RUNTIME_CONF, DbestConfig
from dbestclient.tools.variables import Slave, UseCols
class SqlExecutor:
"""
This is the executor for the SQL query.
"""
def __init__(self):
self.parser = None
self.config = DbestConfig() # model-related configuration
self.runtime_config = RUNTIME_CONF
self.last_config = None
self.model_catalog = DBEstModelCatalog()
self.init_slaves()
self.init_model_catalog()
self.save_sample = False
# self.table_header = None
self.n_total_records = None
self.use_kde = True
def init_model_catalog(self):
# search the warehouse, and add all available models.
n_model = 0
t1 = datetime.now()
for file_name in os.listdir(self.config.get_config()['warehousedir']):
# load simple models
if file_name.endswith(self.runtime_config["model_suffix"]):
if n_model == 0:
print("start loading pre-existing models.")
with open(self.config.get_config()['warehousedir'] + "/" + file_name, 'rb') as f:
model = dill.load(f)
self.model_catalog.model_catalog[model.init_pickle_file_name(
self.runtime_config)] = model
n_model += 1
# # load group by models
# if os.path.isdir(self.config.get_config()['warehousedir'] + "/" + file_name):
# n_models_in_groupby = 0
# if n_model == 0:
# print("start loading pre-existing models.")
# for model_name in os.listdir(self.config.get_config()['warehousedir'] + "/" + file_name):
# if model_name.endswith(self.runtime_config["model_suffix"]):
# with open(self.config.get_config()['warehousedir'] + "/" + file_name + "/" + model_name, 'rb') as f:
# model = dill.load(f)
# n_models_in_groupby += 1
# if n_models_in_groupby == 1:
# groupby_model_wrapper = GroupByModelWrapper(model.mdl, model.tbl, model.x, model.y,
# model.groupby_attribute,
# x_min_value=model.x_min_value,
# x_max_value=model.x_max_value)
# groupby_model_wrapper.add_simple_model(model)
# self.model_catalog.model_catalog[file_name] = groupby_model_wrapper.models
# n_model += 1
if n_model > 0:
print("Loaded " + str(n_model) + " models.", end=" ")
if self.runtime_config["b_show_latency"]:
t2 = datetime.now()
print("time cost ", (t2-t1).total_seconds(), "s")
else:
print()
def init_slaves(self):
file_name = os.path.join(self.config.config["warehousedir"], "slaves")
if os.path.exists(file_name) and os.path.getsize(file_name) > 0:
with open(file_name, "r") as f:
for line in f:
if "#" not in line:
self.runtime_config["slaves"].add(Slave(line))
if self.runtime_config['v']:
print("Cluster mode is on, slaves are " +
self.runtime_config["slaves"].to_string())
else:
if self.runtime_config['v']:
print("Local mode is on, as no slaves are provided.")
def execute(self, sql):
# b_use_gg=False, n_per_gg=10, result2file=None,n_mdn_layer_node = 10, encoding = "onehot",n_jobs = 4, b_grid_search = True,device = "cpu", n_division = 20
# prepare the parser
if type(sql) == str:
self.parser = DBEstParser()
self.parser.parse(sql)
elif type(sql) == DBEstParser:
self.parser = sql
else:
print("Unrecognized SQL! Please check it!")
exit(-1)
# execute the query
if self.parser.if_nested_query():
warnings.warn("Nested query is currently not supported!")
else:
sql_type = self.parser.get_query_type()
if sql_type == "create": # process create query
# initialize the configure for each model creation.
if self.last_config:
self.config = self.last_config
else:
self.config = DbestConfig()
# DDL, create the model as requested
mdl = self.parser.get_ddl_model_name()
tbl = self.parser.get_from_name()
if self.parser.if_model_need_filter():
self.config.set_parameter("accept_filter", True)
# remove unnecessary charactor '
tbl = tbl.replace("'", "")
if os.path.isfile(tbl): # the absolute path is provided
original_data_file = tbl
else: # the file is in the warehouse direcotry
original_data_file = self.config.get_config()[
'warehousedir'] + "/" + tbl
yheader = self.parser.get_y()
xheader_continous, xheader_categorical = self.parser.get_x()
ratio = self.parser.get_sampling_ratio()
method = self.parser.get_sampling_method()
table_header = self.config.get_config()['table_header']
# print("table_header", table_header)
if table_header is not None:
table_header = table_header.split(
self.config.get_config()['csv_split_char'])
# make samples
if not self.parser.if_contain_groupby(): # if group by is not involved
sampler = DBEstSampling(
headers=table_header, usecols={"y": yheader, "x_continous": xheader_continous, "x_categorical": xheader_categorical, "gb": None})
else:
groupby_attribute = self.parser.get_groupby_value()
sampler = DBEstSampling(headers=table_header, usecols={
"y": yheader, "x_continous": xheader_continous, "x_categorical": xheader_categorical, "gb": groupby_attribute})
# print(self.config)
if os.path.exists(os.path.join(self.config.get_config()['warehousedir'], mdl + self.runtime_config["model_suffix"])):
print(
"Model {0} exists in the warehouse, please use"
" another model name to train it.".format(mdl))
return
# if self.parser.if_contain_groupby():
# groupby_attribute = self.parser.get_groupby_value()
# if os.path.exists(self.config['warehousedir'] + "/" + mdl + "_groupby_" + groupby_attribute):
# print(
# "Model {0} exists in the warehouse, please use"
# " another model name to train it.".format(mdl))
# return
print("Start creating model " + mdl)
time1 = datetime.now()
if self.save_sample:
sampler.make_sample(
original_data_file, ratio, method, split_char=self.config.get_config()[
'csv_split_char'],
file2save=self.config.get_config()['warehousedir'] +
"/" + mdl + '.csv',
num_total_records=self.n_total_records)
else:
sampler.make_sample(
original_data_file, ratio, method, split_char=self.config.get_config()[
'csv_split_char'],
num_total_records=self.n_total_records)
# set the n_total_point and scaling factor for each model.
# self.config.set_parameter(
# "n_total_point", sampler.n_total_point)
# self.config.set_parameter(
# "scaling_factor", sampler.scaling_factor)
# print("scaling_factor is ", sampler.scaling_factor)
if not self.parser.if_contain_groupby(): # if group by is not involved
# n_total_point = sampler.n_total_point
# xys = sampler.getyx(yheader, xheader_continous)
# simple_model_wrapper = SimpleModelTrainer(mdl, tbl, xheader_continous, yheader,
# n_total_point, ratio, config=self.config.copy()).fit_from_df(
# xys, self.runtime_config)
# reg = simple_model_wrapper.reg
# density = simple_model_wrapper.density
# n_sample_point = int(simple_model_wrapper.n_sample_point)
# n_total_point = int(simple_model_wrapper.n_total_point)
# x_min_value = float(simple_model_wrapper.x_min_value)
# x_max_value = float(simple_model_wrapper.x_max_value)
# query_engine = QueryEngine(mdl, reg, density, n_sample_point,
# n_total_point, x_min_value, x_max_value, xheader_continous[
# 0],
# self.config)
sampler.sample.sampledf["dummy_gb"] = "dummy"
sampler.sample.usecols = {"y": yheader, "x_continous": xheader_continous,
"x_categorical": xheader_categorical, "gb": "dummy_gb"}
n_total_point, xys = sampler.get_groupby_frequency_data()
# if not n_total_point['if_contain_x_categorical']:
n_total_point.pop("if_contain_x_categorical")
kdeModelWrapper = KdeModelTrainer(
mdl, tbl, xheader_continous[0], yheader,
groupby_attribute=["dummy_gb"],
groupby_values=list(
n_total_point.keys()),
n_total_point=n_total_point,
x_min_value=-np.inf, x_max_value=np.inf,
config=self.config.copy()).fit_from_df(
xys["data"], self.runtime_config, network_size="large")
qe_mdn = MdnQueryEngine(
kdeModelWrapper, config=self.config.copy())
qe_mdn.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
self.model_catalog.add_model_wrapper(
qe_mdn, self.runtime_config)
else: # if group by is involved in the query
if self.config.get_config()['reg_type'] == "qreg":
xys = sampler.getyx(yheader, xheader_continous)
n_total_point = get_group_count_from_table(
original_data_file, groupby_attribute, sep=self.config.get_config()[
'csv_split_char'],
headers=table_header)
n_sample_point = get_group_count_from_df(
xys, groupby_attribute)
groupby_model_wrapper = GroupByModelTrainer(mdl, tbl, xheader_continous, yheader, groupby_attribute,
n_total_point, n_sample_point,
x_min_value=-np.inf, x_max_value=np.inf,
config=self.config.copy()).fit_from_df(
xys, self.runtime_config)
groupby_model_wrapper.serialize2warehouse(
self.config.get_config()['warehousedir'] + "/" + groupby_model_wrapper.dir)
self.model_catalog.model_catalog[groupby_model_wrapper.dir] = groupby_model_wrapper.models
else: # "mdn"
xys = sampler.getyx(
yheader, xheader_continous, groupby=groupby_attribute)
# xys[groupby_attribute] = pd.to_numeric(xys[groupby_attribute], errors='coerce')
# xys=xys.dropna(subset=[yheader, xheader,groupby_attribute])
# n_total_point = get_group_count_from_table(
# original_data_file, groupby_attribute, sep=',',#self.config['csv_split_char'],
# headers=self.table_header)
if isinstance(ratio, str):
frequency_file = self.config.get_config()[
'warehousedir'] + "/" + ratio
# "/num_of_points.csv"
if os.path.exists(frequency_file):
n_total_point = get_group_count_from_summary_file(
frequency_file, sep=',')
n_total_point_sample, xys = sampler.get_groupby_frequency_data()
n_total_point["if_contain_x_categorical"] = n_total_point_sample["if_contain_x_categorical"]
else:
raise FileNotFoundError(
"scaling factor should come from the " +
ratio + " in the warehouse folder, as"
" stated in the SQL. However, the file is not found.")
else:
n_total_point, xys = sampler.get_groupby_frequency_data()
# print(n_total_point)
# for cases when the data file is treated as a sample, we need to scale up the frequency for each group.
if ratio > 1:
file_size = sampler.n_total_point
ratio = float(ratio)/file_size
# if 0 < ratio < 1:
scaled_n_total_point = {}
if "if_contain_x_categorical" in n_total_point:
scaled_n_total_point["if_contain_x_categorical"] = n_total_point.pop(
"if_contain_x_categorical")
if "categorical_distinct_values" in n_total_point:
scaled_n_total_point["categorical_distinct_values"] = n_total_point.pop(
"categorical_distinct_values")
if "x_categorical_columns" in n_total_point:
scaled_n_total_point["x_categorical_columns"] = n_total_point.pop(
"x_categorical_columns")
for key in n_total_point:
# print("key", key, n_total_point[key])
if not isinstance(n_total_point[key], dict):
scaled_n_total_point[key] = n_total_point[key]/ratio
else:
scaled_n_total_point[key] = {}
for sub_key in n_total_point[key]:
scaled_n_total_point[key][sub_key] = n_total_point[key][sub_key]/ratio
n_total_point = scaled_n_total_point
# print("scaled_n_total_point", scaled_n_total_point)
# no categorical x attributes
if not n_total_point['if_contain_x_categorical']:
if not self.config.get_config()["b_use_gg"]:
n_total_point.pop(
"if_contain_x_categorical")
# xys.pop("if_contain_x_categorical")
kdeModelWrapper = KdeModelTrainer(
mdl, tbl, xheader_continous[0], yheader,
groupby_attribute=groupby_attribute,
groupby_values=list(
n_total_point.keys()),
n_total_point=n_total_point,
x_min_value=-np.inf, x_max_value=np.inf,
config=self.config.copy()).fit_from_df(
xys["data"], self.runtime_config, network_size=None)
qe_mdn = MdnQueryEngine(
kdeModelWrapper, config=self.config.copy())
qe_mdn.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
# kdeModelWrapper.serialize2warehouse()
self.model_catalog.add_model_wrapper(
qe_mdn, self.runtime_config)
else:
# print("n_total_point ", n_total_point)
queryEngineBundle = MdnQueryEngineGoGs(
config=self.config.copy()).fit(xys["data"], groupby_attribute,
n_total_point, mdl, tbl,
xheader_continous[0], yheader,
self.runtime_config) # n_per_group=n_per_gg,n_mdn_layer_node = n_mdn_layer_node,encoding = encoding,b_grid_search = b_grid_search
self.model_catalog.add_model_wrapper(
queryEngineBundle, self.runtime_config)
queryEngineBundle.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
else: # x has categorical attributes
# if not self.config.get_config()["b_use_gg"]:
# use a single model to support categorical conditions.
if self.config.config["one_model"]:
qe = MdnQueryEngineXCategoricalOneModel(
self.config.copy())
usecols = {
"y": yheader, "x_continous": xheader_continous,
"x_categorical": xheader_categorical, "gb": groupby_attribute}
useCols = UseCols(usecols)
# get the training data from samples.
gbs, xs, ys = useCols.get_gb_x_y_cols_for_one_model()
gbs_data, xs_data, ys_data = sampler.sample.get_columns_from_original_sample(
gbs, xs, ys)
n_total_point = sampler.sample.get_frequency_of_categorical_columns_for_gbs(
groupby_attribute, xheader_categorical)
# print("n_total_point-----------before",
# n_total_point)
# print("ratio is ", ratio)
scaled_n_total_point = {}
for key in n_total_point:
scaled_n_total_point[key] = {}
for sub_key in n_total_point[key]:
scaled_n_total_point[key][sub_key] = n_total_point[key][sub_key]/ratio
n_total_point = scaled_n_total_point
# print("n_total_point-----------after",
# n_total_point)
# raise
qe.fit(mdl, tbl, gbs_data, xs_data, ys_data, n_total_point, usecols=usecols,
runtime_config=self.runtime_config)
else:
qe = MdnQueryEngineXCategorical(
self.config.copy())
qe.fit(mdl, tbl, xys, n_total_point, usecols={
"y": yheader, "x_continous": xheader_continous,
"x_categorical": xheader_categorical, "gb": groupby_attribute}, runtime_config=self.runtime_config
) # device=device, encoding=encoding, b_grid_search=b_grid_search
qe.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
self.model_catalog.add_model_wrapper(
qe, self.runtime_config)
# else:
# raise ValueError(
# "GoG support for categorical attributes is not supported.")
qe.serialize2warehouse(
self.config.get_config()['warehousedir'], self.runtime_config)
self.model_catalog.add_model_wrapper(
qe, self.runtime_config)
time2 = datetime.now()
t = (time2 - time1).seconds
if self.runtime_config['b_show_latency']:
print("time cost: " + str(t) + "s.")
print("------------------------")
# rest config
self.last_config = None
return
elif sql_type == "select": # process SELECT query
start_time = datetime.now()
predictions = None
# DML, provide the prediction using models
mdl = self.parser.get_from_name()
gb_to_print, [
func, yheader, distinct_condition] = self.parser.get_dml_aggregate_function_and_variable()
if self.parser.if_where_exists():
print("OK")
where_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# xheader, x_lb, x_ub = self.parser.get_dml_where_categorical_equal_and_range()
model = self.model_catalog.model_catalog[mdl +
self.runtime_config["model_suffix"]]
x_header_density = model.density_column
# print("where_conditions", where_conditions)
[x_lb, x_ub] = [where_conditions[2][x_header_density][i]
for i in [0, 1]]
filter_dbest = dict(where_conditions[2])
filter_dbest = [filter_dbest[next(iter(filter_dbest))][i]
for i in [0, 1]]
# print("where_conditions",where_conditions)
# print("filter_dbest",filter_dbest)
predictions = model.predicts(func, x_lb, x_ub, where_conditions,
self.runtime_config, groups=None, filter_dbest=filter_dbest)
# predictions = model.predict_one_pass(
# func, x_lb, x_ub, n_jobs=n_jobs)
elif func == "var":
print("var!!")
model = self.model_catalog.model_catalog[mdl +
self.runtime_config["model_suffix"]]
x_header_density = model.density_column
# print(x_header_density)
predictions = model.predicts("var",runtime_config=self.runtime_config)
# return predictions
else:
print(
"support for query without where clause is not implemented yet! abort!")
return
# if not self.parser.if_contain_groupby(): # if group by is not involved in the query
# simple_model_wrapper = self.model_catalog.model_catalog[get_pickle_file_name(
# mdl)]
# reg = simple_model_wrapper.reg
# density = simple_model_wrapper.density
# n_sample_point = int(simple_model_wrapper.n_sample_point)
# n_total_point = int(simple_model_wrapper.n_total_point)
# x_min_value = float(simple_model_wrapper.x_min_value)
# x_max_value = float(simple_model_wrapper.x_max_value)
# query_engine = QueryEngine(reg, density, n_sample_point,
# n_total_point, x_min_value, x_max_value,
# self.config)
# p, t = query_engine.predict(func, x_lb=x_lb, x_ub=x_ub)
# print("OK")
# print(p)
# if self.config.get_config()['verbose']:
# print("time cost: " + str(t))
# print("------------------------")
# return p, t
# else: # if group by is involved in the query
# if self.config.get_config()['reg_type'] == "qreg":
# start = datetime.now()
# predictions = {}
# groupby_attribute = self.parser.get_groupby_value()
# groupby_key = mdl + "_groupby_" + groupby_attribute
# for group_value, model_wrapper in self.model_catalog.model_catalog[groupby_key].items():
# reg = model_wrapper.reg
# density = model_wrapper.density
# n_sample_point = int(model_wrapper.n_sample_point)
# n_total_point = int(model_wrapper.n_total_point)
# x_min_value = float(model_wrapper.x_min_value)
# x_max_value = float(model_wrapper.x_max_value)
# query_engine = QueryEngine(reg, density, n_sample_point, n_total_point, x_min_value,
# x_max_value,
# self.config)
# predictions[model_wrapper.groupby_value] = query_engine.predict(
# func, x_lb=x_lb, x_ub=x_ub)[0]
# print("OK")
# for key, item in predictions.items():
# print(key, item)
# else: # use mdn models to give the predictions.
# start = datetime.now()
# # predictions = {}
# groupby_attribute = self.parser.get_groupby_value()
# # no categorical x attributes
# # x_categorical_attributes, x_categorical_values, x_categorical_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# x_categorical_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# # no x categrical attributes, use a single model to predict.
# if not x_categorical_conditions[0]:
# if not self.config.get_config()["b_use_gg"]:
# # qe_mdn = MdnQueryEngine(self.model_catalog.model_catalog[mdl + ".pkl"],
# # self.config)
# where_conditions = self.parser.get_dml_where_categorical_equal_and_range()
# # xheader, x_lb, x_ub = self.parser.get_dml_where_categorical_equal_and_range()
# qe_mdn = self.model_catalog.model_catalog[mdl + ".pkl"]
# x_header_density = qe_mdn.density_column
# [x_lb, x_ub] = [where_conditions[2][x_header_density][i]
# for i in [0, 1]]
# print("OK")
# predictions = qe_mdn.predict_one_pass(func, x_lb=x_lb, x_ub=x_ub,
# n_jobs=n_jobs, ) # result2file=result2file,n_division=n_division
# else:
# qe_mdn = self.model_catalog.model_catalog[mdl + ".pkl"]
# # qe_mdn = MdnQueryEngine(qe_mdn, self.config)
# print("OK")
# predictions = qe_mdn.predicts(func, x_lb=x_lb, x_ub=x_ub,
# n_jobs=n_jobs, )
# else:
# pass
# # print("OK")
# # if not self.config.get_config()["b_use_gg"]:
# # # print("x_categorical_values",
# # # x_categorical_values)
# # # print(",".join(x_categorical_values))
# # filter_dbest = self.parser.get_filter()
# # self.model_catalog.model_catalog[mdl + '.pkl'].predicts(
# # func, x_lb, x_ub, x_categorical_conditions, n_jobs=1, filter_dbest=filter_dbest) # ",".join(x_categorical_values)
# # else:
# # pass
if self.runtime_config['b_show_latency']:
end_time = datetime.now()
time_cost = (end_time - start_time).total_seconds()
print("Time cost: %.4fs." % time_cost)
print("------------------------")
return predictions
elif sql_type == "set": # process SET query
if self.last_config:
self.config = self.last_config
else:
self.config = DbestConfig()
try:
key, value = self.parser.get_set_variable_value()
if key in self.config.get_config():
# check variable value before assignment
if key.lower() == "encoder":
value = value.lower()
if value not in ["onehot", "binary", "embedding"]:
value = "binary"
print(
"encoder is not set to a proper value, use default encoding type: binary.")
self.config.get_config()[key] = value
print("OK, " + key + " is updated.")
else: # if variable is within runtime_config
# check if "device" is set. we need to make usre when GPU is not availabe, cpu is used instead.
if key.lower() == "device":
value = value.lower()
if value in ["cpu", "gpu"]:
if torch.cuda.is_available():
if value == "gpu":
value = "cuda:0"
try:
set_start_method_torch('spawn')
except RuntimeError:
print("Fail to set start method as spawn for pytorch multiprocessing, " +
"use default in advance. (see queryenginemdn "
"for more info.)")
else:
set_start_method_cpu("spawn")
if self.runtime_config["v"]:
print("device is set to " + value)
else:
if value == "gpu":
print(
"GPU is not available, use CPU instead")
value = "cpu"
if value == "cpu":
if self.runtime_config["v"]:
print("device is set to " + value)
else:
print("Only GPU or CPU is supported.")
return
self.runtime_config[key] = value
if key in self.runtime_config:
print("OK, " + key + " is updated.")
else:
print("OK, local variable "+key+" is defined.")
except TypeError:
# self.parser.get_set_variable_value() does not return correctly
print("Parameter is not changed. Please check your SQL!")
# save the config
self.last_config = self.config
return
elif sql_type == "drop": # process DROP query
model_name = self.parser.drop_get_model()
model_path = os.path.join(self.config.get_config(
)["warehousedir"], model_name+self.runtime_config["model_suffix"])
if os.path.isfile(model_path):
os.remove(model_path)
print("OK. model is dropped.")
return True
else:
print("Model does not exist!")
return False
elif sql_type == "show":
print("OK")
t_start = datetime.now()
if self.runtime_config['b_print_to_screen']:
for key in self.model_catalog.model_catalog:
print(key.replace(
self.runtime_config["model_suffix"], ''))
if self.runtime_config["v"]:
t_end = datetime.now()
time_cost = (t_end - t_start).total_seconds()
print("Time cost: %.4fs." % time_cost)
else:
print("Unsupported query type, please check your SQL.")
return
def set_table_counts(self, dic):
self.n_total_records = dic
| {"hexsha": "304baccbdd51d088b1fb4c7c1121c5354a95daa9", "size": 36864, "ext": "py", "lang": "Python", "max_stars_repo_path": "dbestclient/executor/executor.py", "max_stars_repo_name": "qingzma/DBEst_MDN", "max_stars_repo_head_hexsha": "3a3e26bede308b70abfad07032dc16a07a170f34", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-23T08:01:08.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T08:01:08.000Z", "max_issues_repo_path": "dbestclient/executor/executor.py", "max_issues_repo_name": "qingzma/DBEst_MDN", "max_issues_repo_head_hexsha": "3a3e26bede308b70abfad07032dc16a07a170f34", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dbestclient/executor/executor.py", "max_forks_repo_name": "qingzma/DBEst_MDN", "max_forks_repo_head_hexsha": "3a3e26bede308b70abfad07032dc16a07a170f34", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-09-28T15:39:12.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-11T11:07:54.000Z", "avg_line_length": 56.7138461538, "max_line_length": 197, "alphanum_fraction": 0.4759657118, "include": true, "reason": "import numpy", "num_tokens": 6459} |
[STATEMENT]
lemma [code]:
\<open>unset_bit 0 m = 2 * (m div 2)\<close>
\<open>unset_bit (Suc n) m = m mod 2 + 2 * unset_bit n (m div 2)\<close> for m :: natural
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. unset_bit 0 m = 2 * (m div 2) &&& unset_bit (Suc n) m = m mod 2 + 2 * unset_bit n (m div 2)
[PROOF STEP]
by (transfer; simp add: unset_bit_Suc)+ | {"llama_tokens": 174, "file": null, "length": 1} |
# Author: sihan
# Date: 2018-10-26
import os # dealing with directories
from random import shuffle # mixing up or currently ordered data that might lead our network astray in training.
import cv2 # working with, mainly resizing, images
import numpy as np # dealing with arrays
from tqdm import tqdm # a nice pretty percentage bar for tasks. Thanks to viewer Daniel BA1/4hler for this suggestion
# TRAIN_DIR = 'E:\Project\Ai_Club\dataSets\\train'
TEST_DIR = 'E:\Project\Ai_Club\dataSets\\test'
TRAIN_DIR = r'E:\Project\dataSet\all\train'
# TEST_DIR = r'E:\Project\dataSet\all\test'
IMG_SIZE = 64
def label_img(img):
word_label = img.split('.')[-3]
# conversion to one-hot array [cat,dog]
# [much cat, no dog]
if word_label == 'cat':
return [1, 0]
# [no cat, very doggo]
elif word_label == 'dog':
return [0, 1]
def create_train_data(numFile):
training_data = []
for img in tqdm(os.listdir(TRAIN_DIR)):
label = label_img(img)
path = os.path.join(TRAIN_DIR, img)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
training_data.append([np.array(img), np.array(label)])
shuffle(training_data)
fileName = './data/train_data_' + str(IMG_SIZE) + "_" + str(numFile) + '.npy'
np.save(fileName, training_data)
return training_data
def create_test_data(numFile):
test_data = []
for img in tqdm(os.listdir(TEST_DIR)):
label = label_img(img)
path = os.path.join(TEST_DIR, img)
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
test_data.append([np.array(img), np.array(label)])
shuffle(test_data)
fileName = './data/test_data_' + str(IMG_SIZE) + "_" + str(numFile) + '.npy'
np.save(fileName, test_data)
return test_data
def process_test_data(numFile):
testing_data = []
for img in tqdm(os.listdir(TEST_DIR)):
path = os.path.join(TEST_DIR, img)
img_num = img.split('.')[0]
img = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
img = cv2.resize(img, (IMG_SIZE, IMG_SIZE))
testing_data.append([np.array(img), img_num])
shuffle(testing_data)
np.save('./data/test_data_proc_' + str(numFile) + "_" + str(IMG_SIZE) + '.npy', testing_data)
return testing_data
# test_data = process_test_data(12500)
# train_data = create_train_data(25000)
test_data2 = create_test_data(2000)
| {"hexsha": "6a30a7c073fb978f3088295714f65c1fa179738d", "size": 2525, "ext": "py", "lang": "Python", "max_stars_repo_path": "classification/make_test_set.py", "max_stars_repo_name": "Sihan-Son/animal_classification", "max_stars_repo_head_hexsha": "e0fcee46c2a31ef35dfd6f84849259474a92101b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-04T04:03:41.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-04T04:03:41.000Z", "max_issues_repo_path": "classification/make_test_set.py", "max_issues_repo_name": "Sihan-Son/animal_classification", "max_issues_repo_head_hexsha": "e0fcee46c2a31ef35dfd6f84849259474a92101b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2018-09-03T00:10:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-09T23:56:14.000Z", "max_forks_repo_path": "classification/make_test_set.py", "max_forks_repo_name": "Sihan-Son/animal_classification", "max_forks_repo_head_hexsha": "e0fcee46c2a31ef35dfd6f84849259474a92101b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-09-04T04:03:42.000Z", "max_forks_repo_forks_event_max_datetime": "2018-09-04T04:03:42.000Z", "avg_line_length": 33.2236842105, "max_line_length": 118, "alphanum_fraction": 0.6499009901, "include": true, "reason": "import numpy", "num_tokens": 660} |
action(::Type{PointerMovesEvent}) = PointerMoves
action(::Type{PointerLeavesWindowEvent}) = PointerLeavesWindow
action(::Type{PointerEntersWindowEvent}) = PointerEntersWindow
action(::Type{ExposeEvent}) = Expose
action(::Type{ResizeEvent}) = Resize
action(::Type{<:MouseEvent{A}}) where {A} = A
action(::Type{<:KeyEvent{A}}) where {A} = A
action(::Type{<:EventDetails{T}}) where {T} = action(T)
action(ed::EventDetails) = action(typeof(ed))
action(ed::EventData) = action(typeof(ed))
callback_symbol(::Type{Resize}) = :on_resize
callback_symbol(::Type{ButtonPressed}) = :on_mouse_button_pressed
callback_symbol(::Type{ButtonReleased}) = :on_mouse_button_released
callback_symbol(::Type{KeyPressed}) = :on_key_pressed
callback_symbol(::Type{KeyReleased}) = :on_key_released
callback_symbol(::Type{Expose}) = :on_expose
callback_symbol(::Type{PointerEntersWindow}) = :on_pointer_enter
callback_symbol(::Type{PointerLeavesWindow}) = :on_pointer_leave
callback_symbol(::Type{PointerMoves}) = :on_pointer_move
callback(callbacks::Callbacks, T) = getproperty(callbacks, callback_symbol(action(T)))
function execute_callback(wm::AbstractWindowManager, ed::EventDetails)
execute_callback(callback(callbacks(wm, ed.win), typeof(ed)), (ed,))
end
function execute_callback(cb::Function, args::Tuple)
cb(args...)
end
execute_callback(cb::Nothing, args::Tuple) = nothing
"""
run(window_manager, mode; kwargs...)
Run an event loop associated with the `window_manager` in a synchronous or asynchronous fashion.
"""
Base.run(W::AbstractWindowManager, ::ExecutionMode) = not_implemented_for(W)
| {"hexsha": "846885f21dd540464ccc596278fbf0ea7e78a1bb", "size": 1596, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/events.jl", "max_stars_repo_name": "serenity4/WindowAbstractions.jl", "max_stars_repo_head_hexsha": "d6547be67c820315d44315efaa673849e3dc2afa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/events.jl", "max_issues_repo_name": "serenity4/WindowAbstractions.jl", "max_issues_repo_head_hexsha": "d6547be67c820315d44315efaa673849e3dc2afa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-10-25T15:58:12.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-11T20:43:03.000Z", "max_forks_repo_path": "src/events.jl", "max_forks_repo_name": "serenity4/WindowAbstractions.jl", "max_forks_repo_head_hexsha": "d6547be67c820315d44315efaa673849e3dc2afa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.9, "max_line_length": 96, "alphanum_fraction": 0.765037594, "num_tokens": 403} |
# ---
# jupyter:
# jupytext:
# formats: ipynb,py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python [conda env:bandit_38]
# language: python
# name: conda-env-bandit_38-py
# ---
# %% language="javascript"
# IPython.notebook.kernel.restart()
# %%
import datetime
import numpy as np
import xarray as xr
from Bandit.model_output import ModelOutput
# %%
workdir = '/Users/pnorton/tmp'
filename = f'{workdir}/seg_gwflow.nc'
filename_rain = f'{workdir}/hru_rain.nc'
st_date = datetime.datetime(1980, 10, 1)
en_date = datetime.datetime(1980, 10, 31)
# the_segs = [49153, 49154, 49155, 49156, 49157]
the_segs = [49153]
# %%
modout = ModelOutput(filename=filename, varname='seg_gwflow', startdate=st_date, enddate=en_date, nhm_segs=the_segs)
# %%
modout.get_var('seg_gwflow').head()
# %%
modout.dataset['seg_gwflow'].to_pandas().head()
# %%
# aa = xr.open_dataset(filename, decode_coords=True, chunks={'nsegment': 1000})
aa = xr.open_dataset(filename_rain, decode_coords=True, chunks={'nhru': 1000})
# %%
aa
# %%
# da.sel(a=da.c.to_index().get_indexer(['x', 'y']))
# aa[seg_gwflow].sel(nsegment=)
a = aa['seg_gwflow']
# a.sel(country=a.currency == 'EUR')
a.sel(nsegment=a.seg_id == 49154).to_pandas()
# %%
# In [63]: da = xr.DataArray(np.random.rand(3,2), dims=list('ab'), coords={'c':(('a',),list('xyz'))})
# In [64]: da.sel(a=(np.isin(da.c, list('xy'))))
a.sel(nsegment=(np.isin(a.seg_id, [49157, 49154]))).to_pandas()
# %%
# %%
# %%
a
# %%
b = aa['seg_id']
# b.sel(nsegment=b.seg_id in [49154, 49157]).to_pandas()
# data = self.__dataset[varname].loc[:, self.__nhm_hrus].to_pandas()
b.loc[[49154, 49157]].to_pandas()
# %%
# Get the indices for the NHM (global) ids
cc = b.to_index().get_indexer([49157, 49154])
# %%
dd = a.loc[:, cc].to_pandas()
# %%
dd
# %%
ee = a.loc[:, cc]
# %%
ee
# %%
# da.assign_coords(lon=(((da.lon + 180) % 360) - 180))
# aa = xr.open_dataset(filename, decode_coords=True, chunks={'nsegment': 1000})
# aa = xr.open_dataset(filename, chunks={'nsegment': 1000})
aa = xr.open_dataset(filename_rain, decode_coords=True, chunks={'nhru': 1000})
aa
# %%
ba = aa.assign_coords(nhru=(aa.nhm_id))
# %%
dd = ba['hru_rain'].loc[:, [101, 102]].to_pandas()
# %%
dd
# %%
ba
# %%
list(range(1,11))
# %%
| {"hexsha": "0d0570e41bef2b304b4330aca95f2516a55a4668", "size": 2394, "ext": "py", "lang": "Python", "max_stars_repo_path": "04_Bandit/subset_model_output_work.py", "max_stars_repo_name": "pnorton-usgs/notebooks", "max_stars_repo_head_hexsha": "17a38ecd3f3c052b9bd785c2e53e16a9082d1e71", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "04_Bandit/subset_model_output_work.py", "max_issues_repo_name": "pnorton-usgs/notebooks", "max_issues_repo_head_hexsha": "17a38ecd3f3c052b9bd785c2e53e16a9082d1e71", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "04_Bandit/subset_model_output_work.py", "max_forks_repo_name": "pnorton-usgs/notebooks", "max_forks_repo_head_hexsha": "17a38ecd3f3c052b9bd785c2e53e16a9082d1e71", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.95, "max_line_length": 116, "alphanum_fraction": 0.6399331662, "include": true, "reason": "import numpy", "num_tokens": 807} |
handle("gotosymbol") do data
@destruct [
word,
path || nothing,
# local context
column || 1,
row || 1,
startRow || 0,
context || "",
onlyGlobal || true,
# module context
mod || "Main",
text || "",
] = data
gotosymbol(
word, path,
column, row, startRow, context, onlyGlobal,
mod, text,
)
end
function gotosymbol(
word, path = nothing,
# local context
column = 1, row = 1, startrow = 0, context = "", onlyglobal = false,
# module context
mod = "Main", text = ""
)
try
# local goto
if !onlyglobal
localitems = localgotoitem(word, path, column, row, startrow, context)
isempty(localitems) || return Dict(
:error => false,
:items => todict.(localitems)
)
end
# global goto
globalitems = globalgotoitems(word, getmodule(mod), path, text)
isempty(globalitems) || return Dict(
:error => false,
:items => todict.(globalitems),
)
catch err
return Dict(:error => true)
end
return Dict(:error => true) # nothing hits
end
struct GotoItem
text::String
file::String
line::Int
secondary::String
GotoItem(text, file, line = 0, secondary = "") = new(text, normpath(file), line, secondary)
end
todict(gotoitem::GotoItem) = Dict(
:text => gotoitem.text,
:file => gotoitem.file,
:line => gotoitem.line,
:secondary => gotoitem.secondary,
)
### local goto
function localgotoitem(word, path, column, row, startrow, context)
word = first(split(word, '.')) # always ignore dot accessors
position = row - startrow
ls = locals(context, position, column)
filter!(ls) do l
l[:name] == word &&
l[:line] < position
end
map(ls) do l # there should be zero or one element in `ls`
text = l[:name]
line = startrow + l[:line] - 1
GotoItem(text, path, line)
end
end
localgotoitem(word, ::Nothing, column, row, startrow, context) = [] # when called from docpane/workspace
### global goto - bundles toplevel gotos & method gotos
function globalgotoitems(word, mod, path, text)
# strip a dot-accessed module if exists
identifiers = split(word, '.')
head = string(identifiers[1])
if head ≠ word && (nextmod = getfield′(mod, head)) isa Module
# if `head` is a module, update `word` and `mod`
nextword = join(identifiers[2:end], '.')
return globalgotoitems(nextword, nextmod, text, path)
end
val = getfield′(mod, word)
val isa Module && return [GotoItem(val)] # module goto
items = toplevelgotoitems(word, mod, path, text)
# append method gotos that are not caught by `toplevelgotoitems`
ml = methods(val)
files = map(item -> item.file, items)
methoditems = filter!(item -> item.file ∉ files, methodgotoitems(ml))
append!(items, methoditems)
end
## module goto
function GotoItem(mod::Module)
file, line = mod == Main ? MAIN_MODULE_LOCATION[] : moduledefinition(mod)
GotoItem(string(mod), file, line - 1)
end
## toplevel goto
const PathItemsMaps = Dict{String, Vector{ToplevelItem}}
"""
Atom.SYMBOLSCACHE
"module" (`String`) ⟶ "path" (`String`) ⟶ "symbols" (`Vector{ToplevelItem}`) map.
!!! note
"module" should be canonical, i.e.: should be identical to names that are
constructed from `string(mod::Module)`.
"""
const SYMBOLSCACHE = Dict{String, PathItemsMaps}()
function toplevelgotoitems(word, mod, path, text)
key = string(mod)
pathitemsmaps = if haskey(SYMBOLSCACHE, key)
SYMBOLSCACHE[key]
else
SYMBOLSCACHE[key] = collecttoplevelitems(mod, path, text) # caching
end
ismacro(word) && (word = lstrip(word, '@'))
ret = []
for (path, items) in pathitemsmaps
for item in filter(item -> filtertoplevelitem(word, item), items)
push!(ret, GotoItem(path, item))
end
end
return ret
end
# entry methods
function collecttoplevelitems(mod::Module, path::String, text::String)
return if mod == Main || isuntitled(path)
# for `Main` module and unsaved editors, always use CSTPraser-based approach
# with a given buffer text, and don't check module validity
__collecttoplevelitems(nothing, path, text)
else
_collecttoplevelitems(mod)
end
end
# when `path === nothing`, e.g.: called from docpane/workspace
collecttoplevelitems(mod::Module, path::Nothing, text::String) = _collecttoplevelitems(mod)
function _collecttoplevelitems(mod::Module)
entrypath, paths = modulefiles(mod)
return if entrypath !== nothing # Revise-like approach
__collecttoplevelitems(stripdotprefixes(string(mod)), [entrypath; paths])
else # if Revise-like approach fails, fallback to CSTParser-based approach
entrypath, line = moduledefinition(mod)
__collecttoplevelitems(stripdotprefixes(string(mod)), entrypath)
end
end
# module-walk via Revise-like approach
function __collecttoplevelitems(mod::Union{Nothing, String}, paths::Vector{String})
pathitemsmaps = PathItemsMaps()
entrypath, paths = paths[1], paths[2:end]
# ignore toplevel items outside of `mod`
items = toplevelitems(read(entrypath, String); mod = mod)
push!(pathitemsmaps, entrypath => items)
# collect symbols in included files (always in `mod`)
for path in paths
items = toplevelitems(read(path, String); mod = mod, inmod = true)
push!(pathitemsmaps, path => items)
end
pathitemsmaps
end
# module-walk based on CSTParser, looking for toplevel `included` calls
function __collecttoplevelitems(mod::Union{Nothing, String}, entrypath::String, pathitemsmaps::PathItemsMaps = PathItemsMaps(); inmod = false)
isfile′(entrypath) || return
text = read(entrypath, String)
__collecttoplevelitems(mod, entrypath, text, pathitemsmaps; inmod = inmod)
end
function __collecttoplevelitems(mod::Union{Nothing, String}, entrypath::String, text::String, pathitemsmaps::PathItemsMaps = PathItemsMaps(); inmod = false)
items = toplevelitems(text; mod = mod, inmod = inmod)
push!(pathitemsmaps, entrypath => items)
# looking for toplevel `include` calls
for item in items
if item isa ToplevelCall
expr = item.expr
if isinclude(expr)
nextfile = expr.args[3].val
nextentrypath = joinpath(dirname(entrypath), nextfile)
isfile′(nextentrypath) || continue
# `nextentrypath` is always in `mod`
__collecttoplevelitems(mod, nextentrypath, pathitemsmaps; inmod = true)
end
end
end
pathitemsmaps
end
filtertoplevelitem(word, item::ToplevelItem) = false
function filtertoplevelitem(word, bind::ToplevelBinding)
bind = bind.bind
bind === nothing ? false : word == bind.name
end
function filtertoplevelitem(word, tupleh::ToplevelTupleH)
expr = tupleh.expr
for arg in expr.args
if str_value(arg) == word
return true
end
end
return false
end
function GotoItem(path::String, bind::ToplevelBinding)
expr = bind.expr
text = bind.bind.name
if CSTParser.has_sig(expr)
sig = CSTParser.get_sig(expr)
text = str_value(sig)
end
line = bind.lines.start - 1
secondary = string(path, ":", line + 1)
GotoItem(text, path, line, secondary)
end
function GotoItem(path::String, tupleh::ToplevelTupleH)
expr = tupleh.expr
text = str_value(expr)
line = tupleh.lines.start - 1
secondary = string(path, ":", line + 1)
GotoItem(text, path, line, secondary)
end
## update toplevel symbols cache
# NOTE: handled by the `updateeditor` handler in outline.jl
function updatesymbols(mod, path::Nothing, text) end # fallback case
function updatesymbols(mod, path::String, text)
m = getmodule(mod)
# initialize the cache if there is no previous one
if !haskey(SYMBOLSCACHE, mod)
SYMBOLSCACHE[mod] = collecttoplevelitems(m, path, text)
end
# ignore toplevel items outside of `mod` when `path` is an entry file
entrypath, _ = moduledefinition(m)
inmod = path != entrypath
items = toplevelitems(text; mod = stripdotprefixes(mod), inmod = inmod)
push!(SYMBOLSCACHE[mod], path => items)
end
## generate toplevel symbols cache
handle("regeneratesymbols") do
with_logger(JunoProgressLogger()) do
regeneratesymbols()
end
nothing
end
function regeneratesymbols()
id = "regenerate_symbols_progress"
@info "Generating symbols cache in loaded modules" progress=0 _id=id
loaded = Set(string.(Base.loaded_modules_array()))
pkgs = if isdefined(Pkg, :dependencies)
getfield.(values(Pkg.dependencies()), :name)
else
collect(keys(Pkg.installed()))
end
unloaded = filter(pkg -> pkg ∉ loaded, pkgs)
loadedlen = length(loaded)
unloadedlen = length(unloaded)
total = loadedlen + unloadedlen
for (i, mod) in enumerate(Base.loaded_modules_array())
try
key = string(mod)
key == "__PackagePrecompilationStatementModule" && continue # will cause error
@logmsg -1 "Symbols: $key ($i / $total)" progress=i/total _id=id
SYMBOLSCACHE[key] = _collecttoplevelitems(mod)
catch err
@error err
end
end
for (i, pkg) in enumerate(unloaded)
try
@logmsg -1 "Symbols: $pkg ($(i + loadedlen) / $total)" progress=(i+loadedlen)/total _id=id
path = Base.find_package(pkg)
SYMBOLSCACHE[pkg] = __collecttoplevelitems(pkg, path)
catch err
@error err
end
end
@info "Finished generating the symbols cache" progress=1 _id=id
end
## clear toplevel symbols cache
handle("clearsymbols") do
clearsymbols()
nothing
end
function clearsymbols()
for key in keys(SYMBOLSCACHE)
delete!(SYMBOLSCACHE, key)
end
end
## method goto
methodgotoitems(ml) = map(GotoItem, aggregatemethods(ml))
# aggregate methods with default arguments to the ones with full arguments
function aggregatemethods(ml)
ms = collect(ml)
sort!(ms, by = m -> m.nargs, rev = true)
unique(m -> (m.file, m.line), ms)
end
function GotoItem(m::Method)
_, link = view(m)
sig = sprint(show, m)
text = replace(sig, methodloc_regex => s"\g<sig>")
file = link.file
line = link.line - 1
secondary = join(link.contents)
GotoItem(text, file, line, secondary)
end
| {"hexsha": "3c37ded889bc0be01578ebb43f67a406c5f37a34", "size": 9943, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/goto.jl", "max_stars_repo_name": "UnofficialJuliaMirror/Atom.jl-c52e3926-4ff0-5f6e-af25-54175e0327b1", "max_stars_repo_head_hexsha": "85c218fae87ae4352f47b0be28366c2daa6b9a5a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/goto.jl", "max_issues_repo_name": "UnofficialJuliaMirror/Atom.jl-c52e3926-4ff0-5f6e-af25-54175e0327b1", "max_issues_repo_head_hexsha": "85c218fae87ae4352f47b0be28366c2daa6b9a5a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/goto.jl", "max_forks_repo_name": "UnofficialJuliaMirror/Atom.jl-c52e3926-4ff0-5f6e-af25-54175e0327b1", "max_forks_repo_head_hexsha": "85c218fae87ae4352f47b0be28366c2daa6b9a5a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1671388102, "max_line_length": 156, "alphanum_fraction": 0.6880217238, "num_tokens": 2784} |
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision.datasets import MNIST
from torchvision.transforms import Compose, ToTensor, Normalize
from tqdm import tqdm
from ignite.engine import Engine, create_supervised_evaluator, create_supervised_trainer, Events
from ignite.metrics import Accuracy, Loss
import numpy as np
def get_data_loaders(train_batch_size, val_batch_size):
data_transform = Compose([ToTensor(), Normalize((0.1307,), (0.3081,))])
train_loader = DataLoader(MNIST(download=True, root=".", transform=data_transform, train=True),
batch_size=train_batch_size, shuffle=True)
val_loader = DataLoader(MNIST(download=False, root=".", transform=data_transform, train=False),
batch_size=val_batch_size, shuffle=False)
return train_loader, val_loader
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 32, kernel_size=3, padding=1)
self.conv2 = nn.Conv2d(32, 64, kernel_size=3, padding=1)
self.fc1 = nn.Linear(7*7*64, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2)
x = x.view(-1, 7*7*64)
x = self.fc1(x)
return F.log_softmax(x, dim=-1)
batch_size = 64
lr = 1e-3
train_loader, val_loader = get_data_loaders(batch_size, batch_size)
model = ConvNet()
device = 'cuda'
optimizer = optim.Adam(model.parameters(), lr=lr)
trainer = create_supervised_trainer(model, optimizer, F.nll_loss, device=device)
evaluator = create_supervised_evaluator(
model=model,
metrics={'accuracy': Accuracy(),
'nll': Loss(F.nll_loss)},
device=device
)
desc = "ITERATION - loss: {:.2f}"
pbar = tqdm(
initial=0, leave=False, total=len(train_loader),
desc=desc.format(0)
)
@trainer.on(Events.ITERATION_COMPLETED)
def log_training_loss(engine):
iter = (engine.state.iteration - 1) % len(train_loader) + 1
pbar.desc = desc.format(engine.state.output)
pbar.update(1)
@trainer.on(Events.EPOCH_COMPLETED)
def log_train_metrics(engine):
pbar.refresh()
evaluator.run(train_loader)
metrics = evaluator.state.metrics
avg_accuracy = metrics['accuracy']
avg_nll = metrics['nll']
tqdm.write(
"Training Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, avg_accuracy, avg_nll))
@trainer.on(Events.EPOCH_COMPLETED)
def log_val_metrics(engine):
evaluator.run(val_loader)
metrics = evaluator.state.metrics
tqdm.write(
"Validation Results - Epoch: {} Avg accuracy: {:.2f} Avg loss: {:.2f}"
.format(engine.state.epoch, metrics['accuracy'], metrics['nll']))
pbar.n = pbar.last_print_n = 0
trainer.run(train_loader, max_epochs=10)
pbar.close()
| {"hexsha": "dc282927aaf512293a67f61b3c0f2ab7d036ac8d", "size": 2964, "ext": "py", "lang": "Python", "max_stars_repo_path": "ignite_examples/mnits_ignite.py", "max_stars_repo_name": "Daiver/Depth-regression", "max_stars_repo_head_hexsha": "cbc2a119915b4c350f0ad0b0a269aab520c03304", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ignite_examples/mnits_ignite.py", "max_issues_repo_name": "Daiver/Depth-regression", "max_issues_repo_head_hexsha": "cbc2a119915b4c350f0ad0b0a269aab520c03304", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ignite_examples/mnits_ignite.py", "max_forks_repo_name": "Daiver/Depth-regression", "max_forks_repo_head_hexsha": "cbc2a119915b4c350f0ad0b0a269aab520c03304", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.64, "max_line_length": 99, "alphanum_fraction": 0.6811740891, "include": true, "reason": "import numpy", "num_tokens": 756} |
import random
import numpy as np
import tensorflow as tf
from data_predication import features
def create_feature_sets_and_labels(features, test_size=0.3):
# shuffle out features and turn into np.array
random.shuffle(features)
features = np.array(features)
# split a portion of the features into tests
testing_size = int(test_size * len(features))
# create train and test lists
train_x = list(features[:, 0][:-testing_size])
train_y = list(features[:, 1][:-testing_size])
test_x = list(features[:, 0][-testing_size:])
test_y = list(features[:, 1][-testing_size:])
return train_x, train_y, test_x, test_y
train_x, train_y, test_x, test_y = create_feature_sets_and_labels(features)
# hidden layers and their nodes
n_nodes_hl1 = 20
n_nodes_hl2 = 20
# classes in our output
n_classes = 2
# iterations and batch-size to build out model
learning_times = 3000
learning_rate = 1
batch_size = 4
inputs = tf.placeholder(tf.float32)
outputs = tf.placeholder(tf.float32)
# random weights and bias for our layers
hidden_1_layer = {'f_fum': n_nodes_hl1,
'weight': tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'f_fum': n_nodes_hl2,
'weight': tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'bias': tf.Variable(tf.random_normal([n_nodes_hl2]))}
output_layer = {'f_fum': None,
'weight': tf.Variable(tf.random_normal([n_nodes_hl2, n_classes])),
'bias': tf.Variable(tf.random_normal([n_classes])), }
# our predictive model's definition
def neural_network_model(input_data):
# hidden layer 1: (data * W) + b
l1 = tf.add(tf.matmul(input_data, hidden_1_layer['weight']), hidden_1_layer['bias'])
l1 = tf.sigmoid(l1)
# hidden layer 2: (hidden_layer_1 * W) + b
l2 = tf.add(tf.matmul(l1, hidden_2_layer['weight']), hidden_2_layer['bias'])
l2 = tf.sigmoid(l2)
# output: (hidden_layer_2 * W) + b
output_data = tf.matmul(l2, output_layer['weight']) + output_layer['bias']
return output_data
# training our model
def train_neural_network():
# use the model definition
prediction = neural_network_model(inputs)
# formula for cost (error)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(logits=prediction, labels=outputs))
# optimize for cost using GradientDescent
optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
# Tensorflow session
with tf.Session() as sess:
tf.summary.FileWriter('log_ANN_graph', sess.graph)
# initialize our variables
sess.run(tf.global_variables_initializer())
# loop through specified number of iterations
for epoch in range(learning_times):
i = 0
# handle batch sized chunks of training data
for _ in train_x:
start = i
end = i + batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, loss], feed_dict={inputs: batch_x, outputs: batch_y})
i += batch_size
last_cost = c
if i >= len(train_x): break
# print cost updates along the way
if (epoch % (learning_times / 5)) == 0:
print('Epoch', epoch, 'completed out of', learning_times, 'cost:', last_cost)
# print accuracy of our model
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(outputs, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({inputs: test_x, outputs: test_y}))
output_weight = sess.run(output_layer['weight'])
output_bias = sess.run(output_layer['bias'])
# print predictions using our model
for i, t in enumerate(test_x):
print ('prediction for:', test_x[i])
output = prediction.eval(feed_dict={inputs: [test_x[i]]})
# normalize the prediction values
print(tf.sigmoid(output[0][0]).eval(), tf.sigmoid(output[0][1]).eval())
return output_weight, output_bias
output_weight, output_bias = train_neural_network()
print("final output_weight:\n{}".format(output_weight))
print("final output_bias: {}".format(batch_size))
| {"hexsha": "c7bc1e2db54716a428165b516020eea2adabd3dc", "size": 4444, "ext": "py", "lang": "Python", "max_stars_repo_path": "tf_predication.py", "max_stars_repo_name": "XinyueZ/tf", "max_stars_repo_head_hexsha": "c4e2492bbb27abc44db34521626fb27b199cd240", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tf_predication.py", "max_issues_repo_name": "XinyueZ/tf", "max_issues_repo_head_hexsha": "c4e2492bbb27abc44db34521626fb27b199cd240", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tf_predication.py", "max_forks_repo_name": "XinyueZ/tf", "max_forks_repo_head_hexsha": "c4e2492bbb27abc44db34521626fb27b199cd240", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.1846153846, "max_line_length": 97, "alphanum_fraction": 0.650540054, "include": true, "reason": "import numpy", "num_tokens": 1064} |
import unittest
from nextnanopy.shapes import GdsPolygonsRaw, units_factor, validate_unit
import numpy as np
import os
np_equal = np.testing.assert_almost_equal
folder = os.path.join('tests', 'gds')
class TestShapes(unittest.TestCase):
def test_unit(self):
self.assertEqual(units_factor['nm'], 1e-9)
self.assertEqual(units_factor['um'], 1e-6)
self.assertEqual(units_factor['mm'], 1e-3)
self.assertEqual(units_factor['m'], 1)
self.assertEqual(units_factor['si'], 1)
self.assertRaises(KeyError, validate_unit, 'none')
self.assertTrue(validate_unit('NM'))
def test_example0(self):
fullpath = os.path.join(folder, 'example0.gds')
gpols = GdsPolygonsRaw(fullpath, unit='nm')
self.assertEqual(gpols.fullpath, fullpath)
np_equal(gpols.labels, [0])
self.assertEqual(gpols.unit, 'nm')
self.assertEqual(gpols.nb_polygons, 1)
np_equal(gpols.xy[0][0], np.array([-500, -500, 500, 500]))
np_equal(gpols.xy[0][1], np.array([-500, 500, 500, -500]))
gpols.unit = 'm'
self.assertEqual(gpols.unit, 'm')
self.assertEqual(gpols.nb_polygons, 1)
np_equal(gpols.xy[0][0], np.array([-500, -500, 500, 500]) * 1e-9)
np_equal(gpols.xy[0][1], np.array([-500, 500, 500, -500]) * 1e-9)
gpols.labels = [2]
np_equal(gpols.labels, [2])
gpols.labels = ['2']
self.assertEqual(gpols.labels, ['2'])
def test_example1(self):
fullpath = os.path.join(folder, 'example1.gds')
gpols = GdsPolygonsRaw(fullpath, unit='nm')
np_equal(gpols.labels, [0, 1, 2, 3])
self.assertEqual(gpols.nb_polygons, 4)
self.assertEqual(len(gpols.slices[0].slices), 2)
np_equal(gpols.xy[0][0], np.array([1500., 1500., 1975., 1975., 2025., 2025., 2500., 2500.]))
def test_example2(self):
fullpath = os.path.join(folder, 'example2.gds')
gpols = GdsPolygonsRaw(fullpath, unit='nm')
np_equal(gpols.labels, [0, 1])
self.assertEqual(gpols.nb_polygons, 2)
self.assertEqual(len(gpols.slices[0].slices), 3)
self.assertEqual(len(gpols.slices[1].slices), 17)
if __name__ == '__main__':
unittest.main()
| {"hexsha": "723acd1258c9b0bdd9b0c6bed693a4cd4c7e76bf", "size": 2245, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_shapes.py", "max_stars_repo_name": "nextnanopy/nextnanopy", "max_stars_repo_head_hexsha": "f28266d444f488726f16c9a4eb08e98720f5f683", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-12-01T15:32:40.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-12T06:36:12.000Z", "max_issues_repo_path": "tests/test_shapes.py", "max_issues_repo_name": "nextnanopy/nextnanopy", "max_issues_repo_head_hexsha": "f28266d444f488726f16c9a4eb08e98720f5f683", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-03-16T14:46:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-22T14:13:32.000Z", "max_forks_repo_path": "tests/test_shapes.py", "max_forks_repo_name": "nextnanopy/nextnanopy", "max_forks_repo_head_hexsha": "f28266d444f488726f16c9a4eb08e98720f5f683", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2021-07-06T07:25:47.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-12T06:36:18.000Z", "avg_line_length": 36.2096774194, "max_line_length": 100, "alphanum_fraction": 0.6262806236, "include": true, "reason": "import numpy", "num_tokens": 648} |
#include <fast_asio/fast_asio.hpp>
#include <boost/asio/ssl.hpp>
#include <iostream>
#include <memory>
#include "root_certificates.hpp"
using namespace boost::asio;
using namespace boost::asio::ip;
// socket type
typedef ssl::stream<tcp::socket> ssl_socket;
typedef fast_asio::packet_stream<ssl_socket> socket_t;
typedef std::shared_ptr<socket_t> socket_ptr;
ssl::context ctx{ssl::context::sslv23_client};
void onReceive(socket_ptr socket, boost::system::error_code ec, const_buffer* buf_begin, const_buffer* buf_end) {
if (ec) {
std::cout << "disconnected, reason:" << ec.message() << std::endl;
return ;
}
for (const_buffer* it = buf_begin; it != buf_end; ++it) {
const_buffer body = fast_asio::default_packet_policy::get_body(*it);
std::cout << "onReceive:" << std::string((const char*)body.data(), body.size()) << std::endl;
}
// 关闭连接
// boost::system::error_code ignore_ec;
// socket->shutdown(ignore_ec);
}
int main() {
load_root_certificates(ctx);
ctx.set_verify_mode(ssl::verify_none);
io_context ioc;
socket_ptr socket(new socket_t(ioc, ctx));
// 1.设置拆包函数 (默认函数就是这个, 可以不写这一行)
socket->set_packet_splitter(&fast_asio::default_packet_policy::split);
// 2.连接
tcp::endpoint addr(address::from_string("127.0.0.1"), 1234);
socket->next_layer().next_layer().async_connect(addr,
[socket](boost::system::error_code ec) {
if (ec) {
std::cout << "connect error:" << ec.message() << std::endl;
return ;
}
std::cout << "connect success" << std::endl;
// 握手(为了方便写个同步的)
socket->next_layer().handshake(boost::asio::ssl::stream_base::handshake_type::client, ec);
if (ec) {
std::cout << "handshake error:" << ec.message() << std::endl;
return ;
}
// 3.连接成功, 发起读操作
socket->async_read_some(std::bind(&onReceive, socket,
std::placeholders::_1,
std::placeholders::_2,
std::placeholders::_3));
// 4.发一个包
char buf[] = "Hello fast_asio!";
std::string packet = fast_asio::default_packet_policy::serialize_to_string(buffer(buf, sizeof(buf)));
socket->async_write_some(buffer(packet), [](boost::system::error_code ec, size_t){
std::cout << "ping " << ec.message() << std::endl;
});
});
ioc.run();
std::cout << socket.use_count() << std::endl;
}
| {"hexsha": "9ce221fe5647587ee10dbad7d756aee9c9638dfe", "size": 2694, "ext": "cc", "lang": "C++", "max_stars_repo_path": "thirdparty/fast_asio/examples/ssl_client.cc", "max_stars_repo_name": "brinkqiang/dmlibgo", "max_stars_repo_head_hexsha": "f0bf5b1312f852fddab2050f58830bcc16dea03b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thirdparty/fast_asio/examples/ssl_client.cc", "max_issues_repo_name": "brinkqiang/dmlibgo", "max_issues_repo_head_hexsha": "f0bf5b1312f852fddab2050f58830bcc16dea03b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thirdparty/fast_asio/examples/ssl_client.cc", "max_forks_repo_name": "brinkqiang/dmlibgo", "max_forks_repo_head_hexsha": "f0bf5b1312f852fddab2050f58830bcc16dea03b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8536585366, "max_line_length": 117, "alphanum_fraction": 0.5631031923, "num_tokens": 666} |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
import cv2 as cv
import numpy as np
import matplotlib.pyplot as plt
import sys
filename = sys.argv[1]
#print(filename)
img = cv.imread(filename,0)
block_size = 9
constant = 2
k = np.ones((5,5),np.uint8)
blur = cv.GaussianBlur(img,(7,7),0)
fnoise=cv.medianBlur(blur,3)
th1 = cv.adaptiveThreshold(fnoise, 255, cv.ADAPTIVE_THRESH_MEAN_C, cv.THRESH_BINARY, block_size, constant)
th2 = cv.adaptiveThreshold (fnoise, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, block_size, constant)
blu = cv.GaussianBlur(th2,(5,5),0)
fnois=cv.medianBlur(blu,3)
#erosion=cv.erode(fnois, k, iterations=1)
#dilation=cv.dilate(fnois, k, iterations=10)
#cv.imshow('img',dilation)
#cv.waitKey(0)
#cv.destroyAllWindows(
cv.imwrite('clearimage.png',fnois)
#plt.imshow(th2)
#plt.title('image')
#plt.show()
| {"hexsha": "247c76e76932e56a63cf97c5c82f20e8fb163d23", "size": 865, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/adaptive.py", "max_stars_repo_name": "SJSU272Spring2019/Project-Group-14", "max_stars_repo_head_hexsha": "10d1f3272ce6e9022b1843cf998f49fd02f18068", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-04T08:14:32.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-04T08:14:32.000Z", "max_issues_repo_path": "src/adaptive.py", "max_issues_repo_name": "SJSU272Spring2019/Project-Group-14", "max_issues_repo_head_hexsha": "10d1f3272ce6e9022b1843cf998f49fd02f18068", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/adaptive.py", "max_forks_repo_name": "SJSU272Spring2019/Project-Group-14", "max_forks_repo_head_hexsha": "10d1f3272ce6e9022b1843cf998f49fd02f18068", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-05-04T08:16:33.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-27T05:40:09.000Z", "avg_line_length": 28.8333333333, "max_line_length": 112, "alphanum_fraction": 0.710982659, "include": true, "reason": "import numpy", "num_tokens": 273} |
'''
Example implementations of HARK.ConsumptionSaving.ConsPortfolioModel
'''
from HARK.ConsumptionSaving.ConsPortfolioModel import PortfolioConsumerType, init_portfolio
from HARK.ConsumptionSaving.ConsIndShockModel import init_lifecycle
from HARK.utilities import plotFuncs
from copy import copy
from time import time
import numpy as np
import matplotlib.pyplot as plt
# Make and solve an example portfolio choice consumer type
print('Now solving an example portfolio choice problem; this might take a moment...')
MyType = PortfolioConsumerType()
MyType.cycles = 0
t0 = time()
MyType.solve()
t1 = time()
MyType.cFunc = [MyType.solution[t].cFuncAdj for t in range(MyType.T_cycle)]
MyType.ShareFunc = [MyType.solution[t].ShareFuncAdj for t in range(MyType.T_cycle)]
print('Solving an infinite horizon portfolio choice problem took ' + str(t1-t0) + ' seconds.')
# Compute the Merton-Samuelson limiting portfolio share when returns are lognormal
MyType.RiskyVar = MyType.RiskyStd**2
MyType.RiskPrem = MyType.RiskyAvg - MyType.Rfree
def RiskyShareMertSamLogNormal(RiskPrem,CRRA,RiskyVar):
return RiskPrem/(CRRA*RiskyVar)
# Plot the consumption and risky-share functions
print('Consumption function over market resources:')
plotFuncs(MyType.cFunc[0], 0., 20.)
print('Risky asset share as a function of market resources:')
print('Optimal (blue) versus Theoretical Limit (orange)')
plt.xlabel('Normalized Market Resources')
plt.ylabel('Portfolio Share')
plt.ylim(0.0,1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plotFuncs([MyType.ShareFunc[0]
# ,lambda m: RiskyShareMertSamLogNormal(MyType.RiskPrem,MyType.CRRA,MyType.RiskyVar)*np.ones_like(m)
,lambda m: MyType.ShareLimit*np.ones_like(m)
] , 0., 200.)
# Now simulate this consumer type
MyType.track_vars = ['cNrmNow', 'ShareNow', 'aNrmNow', 't_age']
MyType.T_sim = 100
MyType.initializeSim()
MyType.simulate()
print('\n\n\n')
print('For derivation of the numerical limiting portfolio share')
print('as market resources approach infinity, see')
print('http://www.econ2.jhu.edu/people/ccarroll/public/lecturenotes/AssetPricing/Portfolio-CRRA/')
""
# Make another example type, but this one optimizes risky portfolio share only
# on the discrete grid of values implicitly chosen by RiskyCount, using explicit
# value maximization.
init_discrete_share = init_portfolio.copy()
init_discrete_share['DiscreteShareBool'] = True
init_discrete_share['vFuncBool'] = True # Have to actually construct value function for this to work
# Make and solve a discrete portfolio choice consumer type
print('Now solving a discrete choice portfolio problem; this might take a minute...')
DiscreteType = PortfolioConsumerType(**init_discrete_share)
DiscreteType.cycles = 0
t0 = time()
DiscreteType.solve()
t1 = time()
DiscreteType.cFunc = [DiscreteType.solution[t].cFuncAdj for t in range(DiscreteType.T_cycle)]
DiscreteType.ShareFunc = [DiscreteType.solution[t].ShareFuncAdj for t in range(DiscreteType.T_cycle)]
print('Solving an infinite horizon discrete portfolio choice problem took ' + str(t1-t0) + ' seconds.')
# Plot the consumption and risky-share functions
print('Consumption function over market resources:')
plotFuncs(DiscreteType.cFunc[0], 0., 50.)
print('Risky asset share as a function of market resources:')
print('Optimal (blue) versus Theoretical Limit (orange)')
plt.xlabel('Normalized Market Resources')
plt.ylabel('Portfolio Share')
plt.ylim(0.0,1.0)
# Since we are using a discretization of the lognormal distribution,
# the limit is numerically computed and slightly different from
# the analytical limit obtained by Merton and Samuelson for infinite wealth
plotFuncs([DiscreteType.ShareFunc[0]
,lambda m: DiscreteType.ShareLimit*np.ones_like(m)
] , 0., 200.)
print('\n\n\n')
""
# Make another example type, but this one can only update their risky portfolio
# share in any particular period with 15% probability.
init_sticky_share = init_portfolio.copy()
init_sticky_share['AdjustPrb'] = 0.15
# Make and solve a discrete portfolio choice consumer type
print('Now solving a portfolio choice problem with "sticky" portfolio shares; this might take a moment...')
StickyType = PortfolioConsumerType(**init_sticky_share)
StickyType.cycles = 0
t0 = time()
StickyType.solve()
t1 = time()
StickyType.cFuncAdj = [StickyType.solution[t].cFuncAdj for t in range(StickyType.T_cycle)]
StickyType.cFuncFxd = [StickyType.solution[t].cFuncFxd for t in range(StickyType.T_cycle)]
StickyType.ShareFunc = [StickyType.solution[t].ShareFuncAdj for t in range(StickyType.T_cycle)]
print('Solving an infinite horizon sticky portfolio choice problem took ' + str(t1-t0) + ' seconds.')
# Plot the consumption and risky-share functions
print('Consumption function over market resources when the agent can adjust his portfolio:')
plotFuncs(StickyType.cFuncAdj[0], 0., 50.)
print("Consumption function over market resources when the agent CAN'T adjust, by current share:")
M = np.linspace(0., 50., 200)
for s in np.linspace(0.,1.,21):
C = StickyType.cFuncFxd[0](M, s*np.ones_like(M))
plt.plot(M,C)
plt.xlim(0.,50.)
plt.ylim(0.,None)
plt.show()
print('Risky asset share function over market resources (when possible to adjust):')
print('Optimal (blue) versus Theoretical Limit (orange)')
plt.xlabel('Normalized Market Resources')
plt.ylabel('Portfolio Share')
plt.ylim(0.0,1.0)
plotFuncs([StickyType.ShareFunc[0]
,lambda m: StickyType.ShareLimit*np.ones_like(m)
] , 0., 200.)
""
# Make another example type, but this one has *age-varying* perceptions of risky asset returns.
# Begin by making a lifecycle dictionary, but adjusted for the portfolio choice model.
init_age_varying_risk_perceptions = copy(init_lifecycle)
init_age_varying_risk_perceptions['RiskyCount'] = init_portfolio['RiskyCount']
init_age_varying_risk_perceptions['ShareCount'] = init_portfolio['ShareCount']
init_age_varying_risk_perceptions['aXtraMax'] = init_portfolio['aXtraMax']
init_age_varying_risk_perceptions['aXtraCount'] = init_portfolio['aXtraCount']
init_age_varying_risk_perceptions['aXtraNestFac'] = init_portfolio['aXtraNestFac']
init_age_varying_risk_perceptions['BoroCnstArt'] = init_portfolio['BoroCnstArt']
init_age_varying_risk_perceptions['CRRA'] = init_portfolio['CRRA']
init_age_varying_risk_perceptions['DiscFac'] = init_portfolio['DiscFac']
init_age_varying_risk_perceptions['RiskyAvg'] = 10*[1.08]
init_age_varying_risk_perceptions['RiskyStd'] = [0.20,0.21,0.22,0.23,0.24,0.25,0.26,0.27,0.28,0.29]
init_age_varying_risk_perceptions['RiskyAvgTrue'] = 1.08
init_age_varying_risk_perceptions['RiskyStdTrue'] = 0.20
AgeVaryingRiskPercType = PortfolioConsumerType(**init_age_varying_risk_perceptions)
AgeVaryingRiskPercType.cycles = 1
# Solve the agent type with age-varying risk perceptions
print('Now solving a portfolio choice problem with age-varying risk perceptions...')
t0 = time()
AgeVaryingRiskPercType.solve()
AgeVaryingRiskPercType.cFunc = [AgeVaryingRiskPercType.solution[t].cFuncAdj for t in range(AgeVaryingRiskPercType.T_cycle)]
AgeVaryingRiskPercType.ShareFunc = [AgeVaryingRiskPercType.solution[t].ShareFuncAdj for t in range(AgeVaryingRiskPercType.T_cycle)]
t1 = time()
print('Solving a ' + str(AgeVaryingRiskPercType.T_cycle) + ' period portfolio choice problem with age-varying risk perceptions took ' + str(t1-t0) + ' seconds.')
# Plot the consumption and risky-share functions
print('Consumption function over market resources in each lifecycle period:')
plotFuncs(AgeVaryingRiskPercType.cFunc, 0., 20.)
print('Risky asset share function over market resources in each lifecycle period:')
plotFuncs(AgeVaryingRiskPercType.ShareFunc, 0., 200.)
| {"hexsha": "635632d28443aa886e80df53f670653028161849", "size": 7915, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/ConsumptionSaving/example_ConsPortfolioModel.py", "max_stars_repo_name": "fangli-DX3906/HARK", "max_stars_repo_head_hexsha": "a55d06f4e47e9564b3bcc2250c8d8012cc758761", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/ConsumptionSaving/example_ConsPortfolioModel.py", "max_issues_repo_name": "fangli-DX3906/HARK", "max_issues_repo_head_hexsha": "a55d06f4e47e9564b3bcc2250c8d8012cc758761", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/ConsumptionSaving/example_ConsPortfolioModel.py", "max_forks_repo_name": "fangli-DX3906/HARK", "max_forks_repo_head_hexsha": "a55d06f4e47e9564b3bcc2250c8d8012cc758761", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.5588235294, "max_line_length": 161, "alphanum_fraction": 0.7748578648, "include": true, "reason": "import numpy", "num_tokens": 2111} |
"""
Copyright 2015, Ilya Kuzovkin
Copyright 2021-2022, Buran Consulting, LLC
Licensed under MIT
Builds on example code by Jack Keegan
https://batchloaf.wordpress.com/2014/01/17/real-time-analysis-of-data-from-biosemi-activetwo-via-tcpip-using-python/
"""
import logging
log = logging.getLogger(__name__)
import socket
import numpy as np
SPEED_MODE = {
0: 2048,
1: 4096,
2: 8192,
3: 16384,
4: 2048,
5: 4096,
6: 8192,
7: 16384,
}
def is_set(x, bit):
# Parentheses are not needed because of operator precedence rules, but help
# make the code more readable.
return bool((x & (1 << bit)) != 0)
def decode_trigger(x):
'''
Details
-------
Bit 00 (LSB) through 15:
Trigger input 1 through 16. Note that function keys F1 through F6 can embed
trigger bits in bits 8-15 if desired.
Bit 16 High when new Epoch is started
Bit 17 Speed bit 0
Bit 18 Speed bit 1
Bit 19 Speed bit 2
Bit 20 High when CMS is within range
Bit 21 Speed bit 3
Bit 22 High when battery is low
Bit 23 (MSB) High if ActiveTwo MK2
'''
speed_mode = \
(int(is_set(x, 17)) << 0) + \
(int(is_set(x, 18)) << 1) + \
(int(is_set(x, 19)) << 2) + \
(int(is_set(x, 21)) << 3)
trigger = 0b1111111111111111
return {
'trigger': int(x & trigger),
'cms_in_range': is_set(x, 20),
'low_battery': is_set(x, 22),
'ActiveMK2': is_set(x, 23),
'speed_mode': speed_mode,
'new_epoch': is_set(x, 16),
#'fs': SPEED_MODE[speed_mode],
}
class ActiveTwoClient:
"""
Client for communicating with Biosemi ActiveTwo
"""
#: Host where ActiView acquisition software is running
#: This is the port ActiView listens on
#: Number of channles
#: Data packet size (default: 32 channels @ 512Hz)
def __init__(self, host='127.0.0.1', port=8888, eeg_channels=32,
ex_included=False, sensors_included=False,
jazz_included=False, aib_included=False,
trigger_included=False, socket_timeout=0.25,
fs=512, combine_eeg_exg=True):
"""
Initialize connection and parameters of the signal
Parameters
----------
host : string
IP address of ActiView server
port : int
Port number ActiView server is listening on
eeg_channels : float
Number of EEG channels included
combine_eeg_exg : bool
If true, the EEG and EXG channels are combined into a single 2D
array (with EXG stacked at the end).
"""
self.__dict__.update(locals())
# Calculate number of TCP samples in array.
if not (256 <= fs <= 16384):
raise ValueError('Invalid sampling rate supplied')
decimation_factor = 16384 / fs
if int(decimation_factor) != decimation_factor:
raise ValueError('Invalid sampling rate supplied')
self.tcp_samples = int(128 / decimation_factor)
# Build a mapping of channel type to a Numpy slice that can be used to
# segment the data that we read in. I use a little trick to enable
# n_channel to track the "offset" as we build the slices. At the end,
# n_channels will tell us how many channels are being read in.
slices = {}
n_channels = 0
# Since the EXG channels come immediately after the EEG channels, we
# can easily treat them as a single set of channels that are merged.
if combine_eeg_exg and ex_included:
eeg_channels = eeg_channels + 8
ex_included = False
if eeg_channels != 0:
slices['eeg'] = np.s_[n_channels:eeg_channels]
n_channels += eeg_channels
if ex_included:
slices['ex'] = np.s_[n_channels:n_channels+8]
n_channels += 8
if sensors_included:
slices['sensors'] = np.s_[n_channels:n_channels+7]
n_channels += 7
if jazz_included:
slices['jazz'] = np.s_[n_channels:n_channels+9]
n_channels += 9
if aib_included:
slices['aib'] = np.s_[n_channels:n_channels+32]
n_channels += 32
if trigger_included:
slices['trigger'] = np.s_[-1]
n_channels += 1
self.slices = slices
self.n_channels = n_channels
self.buffer_size = self.n_channels * self.tcp_samples * 3
m = 'ActiveTwoClient configured with %d channels at %f Hz'
log.info(m, self.n_channels, self.fs)
log.info('Expecting %d samples/chan', self.tcp_samples)
self._scale = np.array([256**1, 256**2, 256**3])
self._read = self._read_scale
def _read_scale(self, samples):
# This implements the approach used in the Matlab TCP example
data = self.sock.recv(self.buffer_size)
data = np.frombuffer(data, dtype='uint8').reshape((self.tcp_samples, self.n_channels, 3))
return np.sum(data * self._scale, axis=-1).T.astype('int32')
def _read_bitshift(self, samples):
# This is a tad bit slower in testing
signal_buffer = np.zeros((self.n_channels, self.tcp_samples), dtype='uint32')
data = self.sock.recv(self.buffer_size)
for m in range(self.tcp_samples):
# extract samples for each channel
for channel in range(self.n_channels):
offset = m * 3 * self.n_channels + (channel * 3)
# The 3 bytes of each sample arrive in reverse order
sample = \
(data[offset+2] << 24) + \
(data[offset+1] << 16) + \
(data[offset] << 8)
# Store sample to signal buffer
signal_buffer[channel, m] = sample
return signal_buffer
def _read_bitshift(self, samples):
signal_buffer = np.zeros((self.n_channels, self.tcp_samples), dtype='uint32')
data = self.sock.recv(self.buffer_size)
for m in range(self.tcp_samples):
# extract samples for each channel
for channel in range(self.n_channels):
offset = m * 3 * self.n_channels + (channel * 3)
# The 3 bytes of each sample arrive in reverse order
sample = \
(data[offset+2] << 16) + \
(data[offset+1] << 8) + \
(data[offset])
# Store sample to signal buffer
signal_buffer[channel, m] = sample
return signal_buffer
def read(self, duration):
"""
Read signal from EEG
Parameters
----------
duration : float
Duration, in seconds, to read. If duration is too long, then it
seems the ActiView client will disconnect.
Returns
-------
signal : 2D array (channel x time)
Signal.
"""
total_samples = int(round(duration * self.fs))
# The reader process will run until requested amount of data is collected
samples = 0
data = []
while samples < total_samples:
try:
data.append(self._read(samples))
samples += self.tcp_samples
except Exception as e:
log.exception(e)
break
if data:
data = np.concatenate(data, axis=-1)
else:
data = np.empty((self.n_channels, 0), dtype='int32')
result = {}
for name, s in self.slices.items():
if name != 'trigger':
# Convert to microvolts
result[name] = data[s] * 31.25e-9 / 256
else:
result[name] = data[s]
return result
def connect(self):
# Open connection. Be sure to set a timeout to make sure that the
# program does not become unresponsive (on Windows even Ctrl+C can't
# break a socket that's hung waiting for data).
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.connect((self.host, self.port))
self.sock.settimeout(self.socket_timeout)
def disconnect(self):
# Important! Be sure this is called to properly shut down sockets.
self.sock.shutdown(socket.SHUT_RDWR)
self.sock.close()
| {"hexsha": "0c7fde2528e3c1a7c8cb65fa903f94126240dca7", "size": 8409, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyactivetwo/client.py", "max_stars_repo_name": "NCRAR/pyactivetwo", "max_stars_repo_head_hexsha": "01cbfb2f1fdfd7bdfa28fd656a7b6d5ea61ad0aa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyactivetwo/client.py", "max_issues_repo_name": "NCRAR/pyactivetwo", "max_issues_repo_head_hexsha": "01cbfb2f1fdfd7bdfa28fd656a7b6d5ea61ad0aa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pyactivetwo/client.py", "max_forks_repo_name": "NCRAR/pyactivetwo", "max_forks_repo_head_hexsha": "01cbfb2f1fdfd7bdfa28fd656a7b6d5ea61ad0aa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.2371541502, "max_line_length": 116, "alphanum_fraction": 0.5797359971, "include": true, "reason": "import numpy", "num_tokens": 2052} |
% \documentclass{article}
% \usepackage{graphicx}
% \usepackage[a4paper, margin=0.4in]{geometry}
% \usepackage{subcaption}
% \usepackage{printlen}
% \uselengthunit{cm}
% \newlength\imageheight
% \newlength\imagewidth
% \begin{document}
\section{Partial TRP TX5 MSP\_A RX5 Minipod Loopback}\label{sec:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}
\begin{figure}[h] % "[t!]" placement specifier just for this example
\centering
\begin{subfigure}{0.5\textwidth}
\hyperref[sec:TRPFPGATX500RX500MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-00--RX5-00-MSP_A_FPGA.pdf}}
\end{subfigure}\hspace*{\fill}
\begin{subfigure}{0.5\textwidth}
\hyperref[sec:TRPFPGATX501RX501MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-01--RX5-01-MSP_A_FPGA.pdf}}
\end{subfigure}
\begin{subfigure}{0.5\textwidth}
\hyperref[sec:TRPFPGATX502RX502MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-02--RX5-02-MSP_A_FPGA.pdf}}
\end{subfigure}\hspace*{\fill}
\begin{subfigure}{0.4\textwidth}
\hyperref[sec:TRPFPGATX503RX503MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-03--RX5-03-MSP_A_FPGA.pdf}}
\end{subfigure}
\begin{subfigure}{0.5\textwidth}
\hyperref[sec:TRPFPGATX504RX504MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-04--RX5-04-MSP_A_FPGA.pdf}}
\end{subfigure}\hspace*{\fill}
\begin{subfigure}{0.4\textwidth}
\hyperref[sec:TRPFPGATX505RX505MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-05--RX5-05-MSP_A_FPGA.pdf}}
\end{subfigure}
\begin{subfigure}{0.5\textwidth}
\hyperref[sec:TRPFPGATX506RX506MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-06--RX5-06-MSP_A_FPGA.pdf}}
\end{subfigure}\hspace*{\fill}
\begin{subfigure}{0.5\textwidth}
\hyperref[sec:TRPFPGATX507RX507MSPAFPGA9.6-optimized]{\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-07--RX5-07-MSP_A_FPGA.pdf}}
\end{subfigure}
\caption{Partial TRP TX5 MSP\_A RX5 Minipod Loopback} \label{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}
\end{figure}
A cross-reference to Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:PartialTRPTX5MSPARX5MinipodLoopback6.4-optimized]{6.4-optimized}, \hyperref[sec:PartialTRPTX5MSPARX5MinipodLoopback12.8-optimized]{12.8-optimized}. \\
Next summary Figure~\ref{fig:TRPJ1QSFPLoopback9.6-optimized}.
\clearpage
% \end{document}
\subsection{TRP\_FPGA-TX5-00--RX5-00-MSP\_A\_FPGA}\label{sec:TRPFPGATX500RX500MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-00--RX5-00-MSP\_A\_FPGA}
\label{tab:TRPFPGATX500RX500MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTY & \multicolumn{2}{l|}{2018-Jan-24 19:35:48} & \multicolumn{2}{l|}{2018-Jan-24 19:36:17} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 7803 & 41 & 63.08\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0x4002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-00--RX5-00-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-00--RX5-00-MSP\_A\_FPGA} \label{fig:TRPFPGATX500RX500MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX500RX500MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX500RX500MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
\subsection{TRP\_FPGA-TX5-01--RX5-01-MSP\_A\_FPGA}\label{sec:TRPFPGATX501RX501MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-01--RX5-01-MSP\_A\_FPGA}
\label{tab:TRPFPGATX501RX501MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTY & \multicolumn{2}{l|}{2018-Jan-24 19:34:50} & \multicolumn{2}{l|}{2018-Jan-24 19:35:19} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 6996 & 37 & 56.92\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0x4002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-01--RX5-01-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-01--RX5-01-MSP\_A\_FPGA} \label{fig:TRPFPGATX501RX501MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX501RX501MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX501RX501MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
\subsection{TRP\_FPGA-TX5-02--RX5-02-MSP\_A\_FPGA}\label{sec:TRPFPGATX502RX502MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-02--RX5-02-MSP\_A\_FPGA}
\label{tab:TRPFPGATX502RX502MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTH & \multicolumn{2}{l|}{2018-Jan-24 19:36:47} & \multicolumn{2}{l|}{2018-Jan-24 19:37:16} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 9629 & 44 & 67.69\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0xC002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-02--RX5-02-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-02--RX5-02-MSP\_A\_FPGA} \label{fig:TRPFPGATX502RX502MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX502RX502MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX502RX502MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
\subsection{TRP\_FPGA-TX5-03--RX5-03-MSP\_A\_FPGA}\label{sec:TRPFPGATX503RX503MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-03--RX5-03-MSP\_A\_FPGA}
\label{tab:TRPFPGATX503RX503MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTY & \multicolumn{2}{l|}{2018-Jan-24 19:35:19} & \multicolumn{2}{l|}{2018-Jan-24 19:35:47} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 7247 & 37 & 56.92\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0x4002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-03--RX5-03-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-03--RX5-03-MSP\_A\_FPGA} \label{fig:TRPFPGATX503RX503MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX503RX503MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX503RX503MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
\subsection{TRP\_FPGA-TX5-04--RX5-04-MSP\_A\_FPGA}\label{sec:TRPFPGATX504RX504MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-04--RX5-04-MSP\_A\_FPGA}
\label{tab:TRPFPGATX504RX504MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTH & \multicolumn{2}{l|}{2018-Jan-24 19:37:46} & \multicolumn{2}{l|}{2018-Jan-24 19:38:14} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 8886 & 44 & 67.69\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0xC002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-04--RX5-04-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-04--RX5-04-MSP\_A\_FPGA} \label{fig:TRPFPGATX504RX504MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX504RX504MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX504RX504MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
\subsection{TRP\_FPGA-TX5-05--RX5-05-MSP\_A\_FPGA}\label{sec:TRPFPGATX505RX505MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-05--RX5-05-MSP\_A\_FPGA}
\label{tab:TRPFPGATX505RX505MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTY & \multicolumn{2}{l|}{2018-Jan-24 19:34:20} & \multicolumn{2}{l|}{2018-Jan-24 19:34:49} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 7050 & 36 & 55.38\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0x4002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-05--RX5-05-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-05--RX5-05-MSP\_A\_FPGA} \label{fig:TRPFPGATX505RX505MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX505RX505MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX505RX505MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
\subsection{TRP\_FPGA-TX5-06--RX5-06-MSP\_A\_FPGA}\label{sec:TRPFPGATX506RX506MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-06--RX5-06-MSP\_A\_FPGA}
\label{tab:TRPFPGATX506RX506MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTH & \multicolumn{2}{l|}{2018-Jan-24 19:36:17} & \multicolumn{2}{l|}{2018-Jan-24 19:36:47} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 9514 & 43 & 66.15\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0xC002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-06--RX5-06-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-06--RX5-06-MSP\_A\_FPGA} \label{fig:TRPFPGATX506RX506MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX506RX506MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX506RX506MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
\subsection{TRP\_FPGA-TX5-07--RX5-07-MSP\_A\_FPGA}\label{sec:TRPFPGATX507RX507MSPAFPGA9.6-optimized}
% Please add the following required packages to your document preamble:
% \usepackage{booktabs}
\begin{table}[h]
\centering
\caption{TRP\_FPGA-TX5-07--RX5-07-MSP\_A\_FPGA}
\label{tab:TRPFPGATX507RX507MSPAFPGA9.6-optimized}
\begin{tabular}{@{}|l|l|l|l|l|l|@{}}
\toprule
\textbf{SW Version} & \textbf{GT Type} & \multicolumn{2}{l|}{\textbf{Date and Time Started}} & \multicolumn{2}{l|}{\textbf{Date and Time Ended}} \\ \midrule
2017.2 & UltraScale GTH & \multicolumn{2}{l|}{2018-Jan-24 19:37:16} & \multicolumn{2}{l|}{2018-Jan-24 19:37:46} \\ \midrule
\textbf{Reset RX} & \textbf{OA} & \textbf{HO} & \textbf{HO (\%)} & \textbf{VO} & \textbf{VO (\%)} \\ \midrule
true & 8915 & 43 & 66.15\% & 255 & 100.00\% \\ \midrule
\textbf{Dwell Type} & \textbf{Dwell BER} & \textbf{Horizontal Increment} & \textbf{Vertical Increment} & \multicolumn{2}{l|}{\textbf{Misc Info}} \\ \midrule
BER & 1e-7 & 1 & 1 & \multicolumn{2}{l|}{ELF Version: 0xC002 SVN: 0} \\ \bottomrule
\end{tabular}
\end{table}
\begin{figure}[h]
\includegraphicsmaybe{../scans/pdf/9.6-optimized/TRP_FPGA-TX5-07--RX5-07-MSP_A_FPGA.pdf}
\caption{TRP\_FPGA-TX5-07--RX5-07-MSP\_A\_FPGA} \label{fig:TRPFPGATX507RX507MSPAFPGA9.6-optimized}
\end{figure}
Call back to summary Figure~\ref{fig:PartialTRPTX5MSPARX5MinipodLoopback9.6-optimized}.
Sibling eye diagrams: \hyperref[sec:TRPFPGATX507RX507MSPAFPGA6.4-optimized]{6.4-optimized}, \hyperref[sec:TRPFPGATX507RX507MSPAFPGA12.8-optimized]{12.8-optimized}.
\clearpage
\newpage
| {"hexsha": "5b93f6702cc07b614d7ad20de95e4dccc81c0744", "size": 17287, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "out/tex/Partial_TRP_TX5_MSP_A_RX5_Minipod_Loopback_9.6-optimized.tex", "max_stars_repo_name": "mvsoliveira/IBERTpy", "max_stars_repo_head_hexsha": "7d702ed87f0c8fbe90f4ef0445e2d4f77a79ec02", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2019-04-22T14:22:42.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-04T17:49:39.000Z", "max_issues_repo_path": "out/tex/Partial_TRP_TX5_MSP_A_RX5_Minipod_Loopback_9.6-optimized.tex", "max_issues_repo_name": "mvsoliveira/IBERTpy", "max_issues_repo_head_hexsha": "7d702ed87f0c8fbe90f4ef0445e2d4f77a79ec02", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "out/tex/Partial_TRP_TX5_MSP_A_RX5_Minipod_Loopback_9.6-optimized.tex", "max_forks_repo_name": "mvsoliveira/IBERTpy", "max_forks_repo_head_hexsha": "7d702ed87f0c8fbe90f4ef0445e2d4f77a79ec02", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-05-16T03:47:42.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-04T21:03:53.000Z", "avg_line_length": 57.6233333333, "max_line_length": 191, "alphanum_fraction": 0.6421588477, "num_tokens": 6520} |
import tensorflow as tf
import numpy as np
crf_eval_module = tf.load_op_library('/home/wfx/parsing/tensorflow_op_proj_uas/CRFEval.so')
def crf_cost(arc_probs,rel_probs, targets, sen_len):
out1 = crf_eval_module.cost_out(arc_probs,rel_probs,targets,sen_len)
return tf.stop_gradient(out1)
def crf_decode(arc_probs,rel_probs, sen_len):
arc_r,rel_r = crf_eval_module.decode_out(arc_probs,rel_probs,sen_len)
return tf.stop_gradient(arc_r),tf.stop_gradient(rel_r)
if True:
rnidx = 1
x1 = np.random.uniform(-10,0, size=(2,3,3)).astype(np.float32)
x2 = np.random.uniform(-10,0, size=(2,3,3,7)).astype(np.float32)
len1 = np.random.randint(1,6,size = [2]).astype(np.int32)
len1[0]=3
len1[1]=3
gold_conll = np.random.randint(0,3,size = [2,3,5]).astype(np.int32)
tf_x1 = tf.get_variable('Weights1_%d' % rnidx, [2, 3,3], initializer=tf.constant_initializer(x1))
tf_x2 = tf.get_variable('Weights2_%d' % rnidx, [2, 3,3,7], initializer=tf.constant_initializer(x2))
tf_len1 = tf.get_variable('len1_%d'% rnidx, [2],dtype=tf.int32, initializer=tf.constant_initializer(len1))
tf_gold_conll = tf.get_variable('gold_conll_%d' % rnidx, [2, 3,5],dtype=tf.int32, initializer=tf.constant_initializer(gold_conll))
#tf_w = tf.constant(w_prob)
#tf_arc = tf.expand_dims(tf_x1,3)
#tf_res = (tf_arc+tf_x2)
out1 = crf_cost(tf_x1,tf_x2,tf_gold_conll,tf_len1) #- tf_res
cost = tf.reduce_sum(tf.reshape(out1,[-1]))
tf_pred,tf_rel=crf_decode(tf_x1,tf_x2,tf_len1)
with tf.Session('') as sess:
sess.run(tf.initialize_all_variables())
ar,o1,pred,rel=sess.run([cost,out1,tf_pred,tf_rel])
print("cost=",ar)
print(o1)
print(pred)
print(rel)
| {"hexsha": "7c60ff818550298ab106574a511d07b8cbd69f27", "size": 1723, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_op_proj_uas/test.py", "max_stars_repo_name": "wfxedu/CRFBiaffineParser", "max_stars_repo_head_hexsha": "763368fb8890877694caf0281805da5351d57ef0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2017-08-27T18:26:21.000Z", "max_stars_repo_stars_event_max_datetime": "2017-09-01T01:41:15.000Z", "max_issues_repo_path": "tensorflow_op_proj_uas/test.py", "max_issues_repo_name": "wfxedu/CRFBiaffineParser", "max_issues_repo_head_hexsha": "763368fb8890877694caf0281805da5351d57ef0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_op_proj_uas/test.py", "max_forks_repo_name": "wfxedu/CRFBiaffineParser", "max_forks_repo_head_hexsha": "763368fb8890877694caf0281805da5351d57ef0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2888888889, "max_line_length": 133, "alphanum_fraction": 0.7040046431, "include": true, "reason": "import numpy", "num_tokens": 562} |
# export fit_cubic_spline,
# sample_spline,
# sample_spline_derivative,
# sample_spline_derivative2,
# sample_spline_speed,
# sample_spline_theta,
# sample_spline_curvature,
# sample_spline_derivative_of_curvature,
# calc_curve_length,
# arclength,
# calc_curve_param_given_arclen
function _integrate_simpsons(f::Function, a::Real, b::Real, n::Int)
# integrate using Composite Simpson's rule
# reference: https://en.wikipedia.org/wiki/Simpson%27s_rule
@assert(n > 0) # number of intervals
@assert(mod(n,2) == 0) # n must be even
h = (b-a)/n
retval = f(a) + f(b)
flip = true
for i = 1 : n-1
retval += f(a+i*h) * (flip ? 4 : 2)
flip = !flip
end
return h/3*retval
end
function _fit_open(pts::AbstractVector{Float64} )
# fits the 1-D spline such that:
# spline goes through each point
# first and second derivatives match at each inner point
# the second derivative at the ends is zero
# see: http://mathworld.wolfram.com/CubicSpline.html
# this function returns a 4×(n-1) spline coefficient matrix, where n = |pts|
n = length(pts)-1
@assert(n > 0)
M = spzeros(n+1,n+1)
for i = 1 : n
M[i,i] = 4
M[i,i+1] = 1
M[i+1,i] = 1
end
M[n+1,n+1] = 2
M[1,1] = 2
Y = Array{Float64}(n+1)
for i = 1 : n+1
ind_hi = min(i+1,n)
ind_lo = max(1,i-1)
Y[i] = 3*(pts[ind_hi] - pts[ind_lo])
end
D = M\Y
spline_coeffs = Array{Float64}(4, n) # col is <a,b,c,d>
spline_coeffs[1,:] = pts[1:n]
spline_coeffs[2,:] = D[1:n]
spline_coeffs[3,:] = 3*(pts[2:n+1] - pts[1:n]) -2*D[1:n]-D[2:n+1]
spline_coeffs[4,:] = 2*(pts[1:n] - pts[2:n+1]) + D[1:n] + D[2:n+1]
spline_coeffs
end
function _fit_closed(pts::AbstractVector{Float64} )
# fits the 1-D spline such that:
# spline goes through each point
# first and second derivatives match at each inner point
# first the second derivative at the ends match
# see: http://mathworld.wolfram.com/CubicSpline.html
# this function returns a 4×n spline coefficient matrix, where n = |pts|
n = length(pts)-1
@assert(n > 0)
M = spzeros(n+1,n+1)
for i = 1 : n
M[i,i] = 4
M[i,i+1] = 1
M[i+1,i] = 1
end
M[n+1,n+1] = 4
M[1,n+1] = 1
M[n+1,1] = 1
Y = Array{Float64}(n+1)
Y[1] = 3*(pts[2] - pts[n+1])
for i = 2 : n
Y[i] = 3*(pts[i+1] - pts[i-1])
end
Y[end] = 3*(pts[1] - pts[n])
D = M\Y
spline_coeffs = Array{Float64}(4, n+1) # col is <a,b,c,d>
spline_coeffs[1,:] = pts
spline_coeffs[2,:] = D
spline_coeffs[3,1:n] = 3*(pts[2:n+1] - pts[1:n]) -2*D[1:n]-D[2:n+1]
spline_coeffs[4,1:n] = 2*(pts[1:n] - pts[2:n+1]) + D[1:n] + D[2:n+1]
spline_coeffs[3,n+1] = 3*(pts[1] - pts[n+1]) -2*D[n+1]-D[1]
spline_coeffs[4,n+1] = 2*(pts[n+1] - pts[1]) + D[n+1] + D[1]
spline_coeffs
end
function _fit_open(pts::Matrix{Float64}) # 2×n {x,y}
# see http://mathworld.wolfram.com/CubicSpline.html
d,n = size(pts)
n -= 1
Y = Array{Float64}(n+1)
M = sparse(Int[], Int[], Float64[], n+1,n+1)
for i in 1 : n
M[i,i] = 4.0
M[i,i+1] = 1.0
M[i+1,i] = 1.0
end
M[n+1,n+1] = 2.0
M[1,1] = 2.0
retval = Array{Matrix{Float64}}(d)
for k in 1 : d
for i in 1 : n+1
ind_hi = min(i+1,n)
ind_lo = max(1,i-1)
Y[i] = 3*(pts[k,ind_hi] - pts[k,ind_lo])
end
D = M \ Y
spline_coeffs = Array{Float64}(4, n) # col is <a,b,c,d> for a + b⋅t + c⋅t² + d⋅t³
spline_coeffs[1,:] = pts[k,1:n] # x₀
spline_coeffs[2,:] = D[1:n] # x'₀
spline_coeffs[3,:] = 3*(pts[k,2:n+1]' - pts[k,1:n]') -2*D[1:n] - D[2:n+1] # -3x₀ + 3x₁ - 2x'₀ - x'₁
spline_coeffs[4,:] = 2*(pts[k,1:n]' - pts[k,2:n+1]') + D[1:n] + D[2:n+1] # 2x₀ - 2x₁ + x'₀ + x'₁
retval[k] = spline_coeffs
end
retval
end
function _fit_closed(pts::AbstractMatrix{Float64})
d = size(pts,1)
retval = Array{Matrix{Float64}}(d)
for i = 1 : d
retval[i] = _fit_closed(vec(pts[i,:]))
end
retval
end
function fit_cubic_spline(pts::AbstractArray{Float64}; open::Bool=true)
if open
return _fit_open(pts)
else
return _fit_closed(pts)
end
end
function sample_spline(spline_coeffs::AbstractVector{Float64}, t::Float64)
# here t is generally expected to be t ∈ [0,1]
return spline_coeffs[1] + t*(spline_coeffs[2] + t*(spline_coeffs[3] + t*spline_coeffs[4]))
end
function sample_spline(spline_coeffs::AbstractMatrix{Float64}, t::Float64)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
@assert(size(spline_coeffs, 1) == 4)
col_ind = clamp(ceil(Int, t), 1, size(spline_coeffs,2))
sample_spline(spline_coeffs[:,col_ind], t-col_ind+1)
end
function sample_spline(spline_coeffs::AbstractVector{Float64}, t_arr::AbstractVector{Float64})
# here t is generally expected to be t ∈ [0,1]
a = spline_coeffs[1]
b = spline_coeffs[2]
c = spline_coeffs[3]
d = spline_coeffs[4]
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
retval[i] = a + t*(b + t*(c + t*d))
end
retval
end
function sample_spline(spline_coeffs::AbstractMatrix{Float64}, t_arr::AbstractVector{Float64})
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
@assert(size(spline_coeffs, 1) == 4)
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
col_ind = clamp(ceil(Int, t), 1, size(spline_coeffs,2))
retval[i] = sample_spline(spline_coeffs[:,col_ind], t-col_ind+1)
end
retval
end
function sample_spline_derivative(spline_coeffs::AbstractVector{Float64}, t::Float64)
# here t is generally expected to be t ∈ [0,1]
return spline_coeffs[2] + t*(2spline_coeffs[3] + t*3spline_coeffs[4])
end
function sample_spline_derivative(spline_coeffs::AbstractMatrix{Float64}, t::Float64)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
@assert(size(spline_coeffs, 1) == 4)
col_ind = clamp(ceil(Int, t), 1, size(spline_coeffs,2))
sample_spline_derivative(spline_coeffs[:,col_ind], t-col_ind+1)
end
function sample_spline_derivative(spline_coeffs::AbstractVector{Float64}, t_arr::AbstractVector{Float64})
# here t is generally expected to be t ∈ [0,1]
b = spline_coeffs[2]
c = spline_coeffs[3]
d = spline_coeffs[4]
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
retval[i] = b + t*(2c + t*3d)
end
retval
end
function sample_spline_derivative(spline_coeffs::AbstractMatrix{Float64}, t_arr::AbstractVector{Float64})
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
@assert(size(spline_coeffs, 1) == 4)
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
col_ind = clamp(ceil(Int, t), 1, size(spline_coeffs,2))
retval[i] = sample_spline_derivative(spline_coeffs[:,col_ind], t-col_ind+1)
end
retval
end
function sample_spline_derivative2(spline_coeffs::AbstractVector{Float64}, t::Float64)
# here t is generally expected to be t ∈ [0,1]
return 2spline_coeffs[3] + t*6spline_coeffs[4]
end
function sample_spline_derivative2(spline_coeffs::AbstractMatrix{Float64}, t::Float64)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
@assert(size(spline_coeffs, 1) == 4)
col_ind = clamp(ceil(Int, t), 1, size(spline_coeffs,2))
sample_spline_derivative2(spline_coeffs[:,col_ind], t-col_ind+1)
end
function sample_spline_derivative2(spline_coeffs::AbstractVector{Float64}, t_arr::AbstractVector{Float64})
# here t is generally expected to be t ∈ [0,1]
b = spline_coeffs[2]
c = spline_coeffs[3]
d = spline_coeffs[4]
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
retval[i] = 2c + t*6d
end
retval
end
function sample_spline_derivative2(spline_coeffs::AbstractMatrix{Float64}, t_arr::AbstractVector{Float64})
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
@assert(size(spline_coeffs, 1) == 4)
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
col_ind = clamp(ceil(Int, t), 1, size(spline_coeffs,2))
retval[i] = sample_spline_derivative2(spline_coeffs[:,col_ind], t-col_ind+1)
end
retval
end
function sample_spline_speed(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t::Float64)
dxdt = sample_spline_derivative(spline_coeffs_x, t)
dydt = sample_spline_derivative(spline_coeffs_y, t)
hypot(dxdt, dydt)
end
function sample_spline_speed(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t::Float64)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
col_ind = clamp(ceil(Int, t), 1, n)::Int
sample_spline_speed(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1)
end
function sample_spline_speed(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t_arr::AbstractVector{Float64})
# here t is generally expected to be t ∈ [0,1]
bx = spline_coeffs_x[2]
cx = spline_coeffs_x[3]
dx = spline_coeffs_x[4]
by = spline_coeffs_y[2]
cy = spline_coeffs_y[3]
dy = spline_coeffs_y[4]
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
dxdt = bx + t*(2cx + t*3dx)
dydt = by + t*(2cy + t*3dy)
retval[i] = hypot(dxdt, dydt)
end
retval
end
function sample_spline_speed(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t_arr::AbstractVector{Float64})
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
col_ind = clamp(ceil(Int, t), 1, n)
retval[i] = sample_spline_speed(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1)
end
retval
end
function sample_spline_theta(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t::Float64;
stepsize=1e-4
)
# compute the angle from positive x-axis (counter-clockwise positive) of the curve in the positive t direction at t
# uses an approximation via small step size instead of derivative due to zero-derivative issues
# uses the forward derivative approximation unless it would put it out of range
# result returned is in radians
t_lo, t_hi = t, t+stepsize
if t_hi > 1.0
t_lo, t_hi = t-min(1000stepsize,0.1), t
end
x1 = sample_spline(spline_coeffs_x, t_lo)
x2 = sample_spline(spline_coeffs_x, t_hi)
y1 = sample_spline(spline_coeffs_y, t_lo)
y2 = sample_spline(spline_coeffs_y, t_hi)
# println("(t, lo, hi) $t $t_lo $t_hi, ($(atan2(y2-y1, x2-x1)))")
atan2(y2-y1, x2-x1)
end
function sample_spline_theta(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t::Float64)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
col_ind = clamp(ceil(Int, t), 1, n)
sample_spline_theta(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1)
end
function sample_spline_theta(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t_arr::AbstractVector{Float64})
# here t is generally expected to be t ∈ [0,1]
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
retval[i] = sample_spline_theta(spline_coeffs_x, spline_coeffs_y, t)
end
retval
end
function sample_spline_theta(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t_arr::AbstractVector{Float64})
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
col_ind = clamp(ceil(Int, t), 1, n)
retval[i] = sample_spline_theta(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1)
end
retval
end
function sample_spline_curvature(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t::Float64)
# computes the signed curvature
dx = sample_spline_derivative( spline_coeffs_x, t)
dy = sample_spline_derivative( spline_coeffs_y, t)
ddx = sample_spline_derivative2(spline_coeffs_x, t)
ddy = sample_spline_derivative2(spline_coeffs_y, t)
(dx*ddy - dy*ddx)/(dx*dx + dy*dy)^1.5
end
function sample_spline_curvature(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t::Float64)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
col_ind = clamp(ceil(Int, t), 1, n)
sample_spline_curvature(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1)
end
function sample_spline_curvature(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t_arr::AbstractVector{Float64})
# here t is generally expected to be t ∈ [0,1]
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
retval[i] = sample_spline_curvature(spline_coeffs_x, spline_coeffs_y, t)
end
retval
end
function sample_spline_curvature(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t_arr::AbstractVector{Float64})
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
col_ind = clamp(ceil(Int, t), 1, n)
retval[i] = sample_spline_curvature(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1)
end
retval
end
function sample_spline_derivative_of_curvature(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t::Float64;
stepsize=1e-4
)
# computes the derivative of the signed curvature
t_lo, t_hi = t, t+stepsize
if t_hi > 1.0
t_lo, t_hi = t-stepsize, t
end
κ_hi = sample_spline_curvature(spline_coeffs_x, spline_coeffs_y, t_hi)
κ_lo = sample_spline_curvature(spline_coeffs_x, spline_coeffs_y, t_lo)
(κ_hi - κ_lo) / stepsize
end
function sample_spline_derivative_of_curvature(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t::Float64;
stepsize=1e-4
)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
col_ind = clamp(ceil(Int, t), 1, n)
sample_spline_derivative_of_curvature(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1, stepsize=stepsize)
end
function sample_spline_derivative_of_curvature(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64}, t_arr::AbstractVector{Float64};
stepsize=1e-4
)
# here t is generally expected to be t ∈ [0,1]
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
retval[i] = sample_spline_derivative_of_curvature(spline_coeffs_x, spline_coeffs_y, t, stepsize=stepsize)
end
retval
end
function sample_spline_derivative_of_curvature(spline_coeffs_x::AbstractMatrix{Float64}, spline_coeffs_y::AbstractMatrix{Float64}, t_arr::AbstractVector{Float64};
stepsize=1e-4
)
# for t ∈ (-∞,1] we use spline_coeffs[:,1]
# for t ∈ [1,2] we use spline_coeffs[:,2]
# etc.
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x, 1) == 4)
@assert(size(spline_coeffs_y, 1) == 4)
@assert(n == size(spline_coeffs_y, 2))
retval = Array{Float64}(length(t_arr))
for (i,t) in enumerate(t_arr)
col_ind = clamp(ceil(Int, t), 1, n)
retval[i] = sample_spline_derivative_of_curvature(spline_coeffs_x[:,col_ind], spline_coeffs_y[:,col_ind], t-col_ind+1, stepsize=stepsize)
end
retval
end
function calc_curve_length(spline_coeffs_x::AbstractVector{Float64}, spline_coeffs_y::AbstractVector{Float64};
n_intervals::Int = 100
)
# integrate using Simpson's rule
# _integrate_simpsons(t->sample_spline_speed(spline_coeffs_x, spline_coeffs_y, t), 0.0, 1.0, n_intervals)
a = 0.0
b = 1.0
n = n_intervals
h = (b-a)/n
retval = sample_spline_speed(spline_coeffs_x, spline_coeffs_y, a) + sample_spline_speed(spline_coeffs_x, spline_coeffs_y, b)
flip = true
for i = 1 : n-1
retval += sample_spline_speed(spline_coeffs_x, spline_coeffs_y, a+i*h) * (flip ? 4 : 2)
flip = !flip
end
return h/3*retval
end
function calc_curve_length(
spline_coeffs_x::AbstractMatrix{Float64},
spline_coeffs_y::AbstractMatrix{Float64};
n_intervals_per_segment::Int = 100
)
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_y, 2) == n)
@assert(size(spline_coeffs_x, 1) == size(spline_coeffs_y, 1) == 4)
len = 0.0
for i = 1 : n
len += calc_curve_length(spline_coeffs_x[:,i], spline_coeffs_y[:,i], n_intervals = n_intervals_per_segment)
end
len
end
function arclength(
spline_coeffs_x::AbstractVector{Float64},
spline_coeffs_y::AbstractVector{Float64},
t_min::Real = 0.0,
t_max::Real = 1.0,
n_intervals::Int = 100
)
if isapprox(t_min, t_max)
return 0.0
end
# _integrate_simpsons(t->sample_spline_speed(spline_coeffs_x, spline_coeffs_y, t), t_min, t_max, n_intervals)
a = t_min
b = t_max
n = n_intervals
h = (b-a)/n
retval = sample_spline_speed(spline_coeffs_x, spline_coeffs_y, a) + sample_spline_speed(spline_coeffs_x, spline_coeffs_y, b)
flip = true
for i = 1 : n-1
retval += sample_spline_speed(spline_coeffs_x, spline_coeffs_y, a+i*h) * (flip ? 4 : 2)
flip = !flip
end
return h/3*retval
end
function arclength(
spline_coeffs_x::AbstractMatrix{Float64},
spline_coeffs_y::AbstractMatrix{Float64},
t_min::Real = 0.0,
t_max::Real = size(spline_coeffs_x, 2),
n_intervals_per_segment::Int = 100
)
n = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_y, 2) == n)
@assert(size(spline_coeffs_x, 1) == size(spline_coeffs_y, 1) == 4)
if isapprox(t_min, t_max)
return 0.0
end
# println("tmin/tmax: $t_min / $t_max")
len = 0.0
for i = floor(Int, t_min) : min(floor(Int, t_max), n-1)
t_lo, t_hi = float(i), i+1.0
spline_ind = i+1
t_in_min = max(t_lo, t_min) - t_lo
t_in_max = min(t_hi, t_max) - t_lo
# println("($i) t_lo: $t_lo, t_hi: $t_hi, : $t_in_min → $t_in_max")
len += arclength(spline_coeffs_x[:,spline_ind], spline_coeffs_y[:,spline_ind], t_in_min, t_in_max, n_intervals_per_segment)
end
# println("len: ", len)
len
end
function calc_curve_param_given_arclen(
spline_coeffs_x :: AbstractVector{Float64},
spline_coeffs_y :: AbstractVector{Float64},
s :: Float64;
max_iterations :: Int=100,
curve_length :: Float64 = calc_curve_length(spline_coeffs_x, spline_coeffs_y),
epsilon::Float64 = 1e-4 # tolerance required before termination
)
# finds t such that p(t) is a distance s from start of curve
# returns t=0 if s ≤ 0.0 and t=1 if s > L
if s ≤ 0.0
return 0.0
elseif s ≥ curve_length
return 1.0
end
t = s/curve_length
lo, hi = 0.0, 1.0
# @printf("%10s %10s %10s %10s %10s %10s\n", "iter", "lo", "hi", "t", "s", "F")
# println("-"^65)
for iter = 1 : max_iterations
F = arclength(spline_coeffs_x, spline_coeffs_y, 0.0, t) - s
# @printf("%10d %10.5f %10.5f %10.5f %10.5f %10.5f\n", iter-1, lo, hi, t, s, F)
if abs(F) < epsilon
# |F(t)| is close enough to zero, report it
return t
end
DF = sample_spline_speed(spline_coeffs_x, spline_coeffs_y, t)
tCandidate = t - F/DF
if F > 0
hi = t
t = tCandidate ≤ lo ? 0.5*(lo+hi) : tCandidate
else
lo = t
t = tCandidate ≥ hi ? 0.5*(lo+hi) : tCandidate
end
end
# @printf("%10d %10.5f %10.5f %10.5f %10.5f %10s\n", max_iterations, lo, hi, t, s, "-")
t
end
function calc_curve_param_given_arclen(
spline_coeffs_x :: AbstractMatrix{Float64},
spline_coeffs_y :: AbstractMatrix{Float64},
s :: Float64;
max_iterations :: Int=100,
n_integration_intervals :: Int=100, # must be multiple of 2
curve_length :: Float64 = calc_curve_length(spline_coeffs_x, spline_coeffs_y),
epsilon::Float64 = 1e-4 # tolerance required before termination
)
# finds t such that p(t) is a distance s from start of curve
# returns t=0 if s ≤ 0.0 and t=t_max if s > L
n_segments = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x,1) == size(spline_coeffs_y,1) == 4)
@assert(size(spline_coeffs_y,2) == n_segments)
if s ≤ 0.0
return 0.0
elseif s ≥ curve_length
return float(n_segments)
end
t = s/curve_length
lo, hi = 0.0, float(n_segments)
# @printf("%10s %10s %10s %10s %10s %10s\n", "iter", "lo", "hi", "t", "s", "F")
# println("-"^65)
for iter = 1 : max_iterations
F = arclength(spline_coeffs_x, spline_coeffs_y, 0.0, t, n_integration_intervals) - s
# @printf("%10d %10.5f %10.5f %10.5f %10.5f %10.5f\n", iter-1, lo, hi, t, s, F)
if abs(F) < epsilon
return t
end
DF = sample_spline_speed(spline_coeffs_x, spline_coeffs_y, t)
tCandidate = t - F/DF
if F > 0
hi = t
t = tCandidate ≤ lo ? 0.5*(lo+hi) : tCandidate
else
lo = t
t = tCandidate ≥ hi ? 0.5*(lo+hi) : tCandidate
end
end
# @printf("%10d %10.5f %10.5f %10.5f %10.5f %10s\n", max_iterations, lo, hi, t, s, "-")
t
end
function calc_curve_param_given_arclen(
spline_coeffs_x :: AbstractVector{Float64},
spline_coeffs_y :: AbstractVector{Float64},
s_arr :: AbstractVector{Float64}; # assumes s_arr is sorted
max_iterations :: Int=100,
curve_length :: Float64 = calc_curve_length(spline_coeffs_x, spline_coeffs_y),
epsilon::Float64 = 1e-4 # tolerance required before termination
)
n = length(s_arr)
t_arr = Array{Float64}(n)
s = s_arr[1]
t = s/curve_length
if s ≤ 0.0
t = 0.0
elseif s ≥ curve_length
t = 1.0
end
lo = 0.0
for (i,s) in enumerate(s_arr)
if s ≤ 0.0
t = 0.0
t_arr[i], lo = t, t
continue
elseif s ≥ curve_length
t = 1.0
t_arr[i], lo = t, t
continue
end
hi = 1.0
for iter = 1 : max_iterations
F = arclength(spline_coeffs_x, spline_coeffs_y, 0.0, t) - s
if abs(F) < epsilon
t_arr[i], lo = t, t
continue
end
DF = sample_spline_speed(spline_coeffs_x, spline_coeffs_y, t)
tCandidate = t - F/DF
if F > 0
hi = t
t = tCandidate ≤ lo ? 0.5*(lo+hi) : tCandidate
else
lo = t
t = tCandidate ≥ hi ? 0.5*(lo+hi) : tCandidate
end
end
t_arr[i], lo = t, t
end
t_arr
end
function calc_curve_param_given_arclen(
spline_coeffs_x :: AbstractMatrix{Float64},
spline_coeffs_y :: AbstractMatrix{Float64},
s_arr :: AbstractVector{Float64}; # assumes s_arr is sorted
max_iterations :: Int = 50,
curve_length :: Float64 = calc_curve_length(spline_coeffs_x, spline_coeffs_y),
epsilon::Float64 = 1e-4, # tolerance required before termination
n_intervals_in_arclen::Int = 100
)
n_segments = size(spline_coeffs_x, 2)
@assert(size(spline_coeffs_x,1) == size(spline_coeffs_y,1) == 4)
@assert(size(spline_coeffs_y,2) == n_segments)
n = length(s_arr)
t_arr = Array{Float64}(n)
s = s_arr[1]
t = s/curve_length
if s ≤ 0.0
t = 0.0
elseif s ≥ curve_length
return float(n_segments)
end
lo = 0.0
# println("L: ", curve_length)
# println("s_max: ", s_arr[end])
for (i,s) in enumerate(s_arr)
# println("\ns: ", s)
if s ≤ 0.0
t = 0.0
t_arr[i] = lo = t
continue
elseif s ≥ curve_length
t = float(n_segments)
t_arr[i] = lo = t
continue
end
hi = float(n_segments)
for iter = 1 : max_iterations
F = arclength(spline_coeffs_x, spline_coeffs_y, 0.0, t, n_intervals_in_arclen) - s
if abs(F) < epsilon
break
end
DF = sample_spline_speed(spline_coeffs_x, spline_coeffs_y, t)
tCandidate = t - F/DF
if F > 0
hi = t
t = tCandidate ≤ lo ? 0.5*(lo+hi) : tCandidate
else
lo = t
t = tCandidate ≥ hi ? 0.5*(lo+hi) : tCandidate
end
end
t_arr[i] = lo = t
end
t_arr
end | {"hexsha": "1f8b1d3c4cb01d8adf731050bdd4d93e9db7e079", "size": 26712, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/core/splines.jl", "max_stars_repo_name": "Sawato/AutomotiveDrivingModels.jl", "max_stars_repo_head_hexsha": "3fbf6a4a53ebedd710d3dfd9f8440ce87b0573d5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/core/splines.jl", "max_issues_repo_name": "Sawato/AutomotiveDrivingModels.jl", "max_issues_repo_head_hexsha": "3fbf6a4a53ebedd710d3dfd9f8440ce87b0573d5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/core/splines.jl", "max_forks_repo_name": "Sawato/AutomotiveDrivingModels.jl", "max_forks_repo_head_hexsha": "3fbf6a4a53ebedd710d3dfd9f8440ce87b0573d5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4174757282, "max_line_length": 162, "alphanum_fraction": 0.6273959269, "num_tokens": 8629} |
import re
import numpy as np
import matplotlib.pyplot as plt
from polynomial import polynomial
from approximation import calc_approx
from print_polynomial import print_polynomial
def get_exp_data():
with open("example_data_files\\housing.data", "r") as file:
lines = file.readlines()
for i in range(len(lines)):
if lines[i][0] == " ":
lines[i] = lines[i][1:]
lines[i] = re.sub("\n", "", lines[i])
lines[i] = re.sub(" +", " ", lines[i])
lines[i] = re.split(" ", lines[i])
exp_data = []
for line in lines:
curr_set = []
for curr_var in line:
curr_set.append(float(curr_var))
exp_data.append(curr_set)
return exp_data
def root_mean_square_error_calc(exp_data, polynomial_extent, c):
error = 0.0
for curr_data in exp_data:
error += (polynomial(polynomial_extent, curr_data[:-1], c) - curr_data[-1]) ** 2
error = np.sqrt(error / len(exp_data))
return error
def is_float(str_read):
try:
float(str_read)
return True
except ValueError:
return False
def set_vars(l_lim, h_lim, str_read):
is_ok = True
value = None
is_right = is_float(str_read)
if is_right is True:
val = float(str_read)
if (val >= l_lim) and (val <= h_lim):
value = val
ret_str = "ok"
else:
is_ok = False
ret_str = "Error: value is not in valid range"
else:
is_ok = False
ret_str = "Error: Invalid input format "
return ret_str, is_ok, value
def calc_mode(exp_data, polynomial_extent, c):
print("Test mode. To exit enter - stop.")
print("The input format is dotted. Example: 0.456")
print("Enter characteristics:")
characteristic_list = ["crime: crime rate per person by city. ",
"zn: proportion of residential land zoned for lots over 25,000 sq.ft.. ",
"indus: proportion of non-retail business acres per town. ",
"chas: Charles River dummy variable (= 1 if tract bounds river; 0 otherwise). ",
"nox: nitrogen oxides concentration (parts per 10 million). ",
"rm: average number of rooms per dwelling. ",
"age: proportion of owner-occupied units built prior to 1940. ",
"dis: weighted mean of distances to five Boston employment centres. ",
"rad: index of accessibility to radial highways. ",
"tax: full-value property-tax rate per $10,000. ",
"pt_ratio: pupil-teacher ratio by town. ",
"black: 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town. ",
"l_stat: lower status of the population (percent). "]
lim_list = []
for i in range(len(exp_data[0]) - 1):
lim_list.append([min([curr_data[i] for curr_data in exp_data]), max([curr_data[i] for curr_data in exp_data])])
while True:
test_list = []
for i in range(len(exp_data[0]) - 1):
while True:
enter_str = input(characteristic_list[i] + "limits(" + str(lim_list[i][0]) + "___"
+ str(lim_list[i][1]) + ") = ")
if enter_str == "stop":
return None
ret_str, is_ok, value = set_vars(lim_list[i][0], lim_list[i][1], enter_str)
if is_ok:
test_list.append(value)
break
else:
print(ret_str)
print("RESULT:")
print("med_v: median value of owner-occupied homes in $1000s. = " +
str(polynomial(polynomial_extent, test_list, c)) + "\n")
def main():
exp_data = get_exp_data()
polynomial_extent = 1
num_variables = 13
use_test_percent = 90.0
c = calc_approx(exp_data[:int(len(exp_data) * use_test_percent / 100.0)], polynomial_extent, num_variables)
print_polynomial(c, polynomial_extent, num_variables)
error = root_mean_square_error_calc(exp_data[int(len(exp_data) * use_test_percent / 100.0):], polynomial_extent, c)
print("root_mean_square_error = ", error)
calc_mode(exp_data[:int(len(exp_data) * use_test_percent / 100.0)], polynomial_extent, c)
if __name__ == "__main__":
main()
| {"hexsha": "9e81d2e0ffd61eeb5b378b51dd6770434c600503", "size": 4547, "ext": "py", "lang": "Python", "max_stars_repo_path": "example_003.py", "max_stars_repo_name": "DmitriyKhudiakov/MSE_approximation", "max_stars_repo_head_hexsha": "6d25711c14a27301335fe211bff9305ea2ad88d8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "example_003.py", "max_issues_repo_name": "DmitriyKhudiakov/MSE_approximation", "max_issues_repo_head_hexsha": "6d25711c14a27301335fe211bff9305ea2ad88d8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example_003.py", "max_forks_repo_name": "DmitriyKhudiakov/MSE_approximation", "max_forks_repo_head_hexsha": "6d25711c14a27301335fe211bff9305ea2ad88d8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8859649123, "max_line_length": 120, "alphanum_fraction": 0.5597096987, "include": true, "reason": "import numpy", "num_tokens": 1048} |
abstract SVM{TSpec <: SVMSpec} <: RegressionModel
# ==========================================================================
svmModel(spec::SVMSpec, solution::PrimalSolution, predmodel::Predictor, X::AbstractMatrix, Y::AbstractArray) =
primalSVMModel(spec, solution, predmodel, X, Y)
svmModel(spec::SVMSpec, solution::DualSolution, predmodel::Predictor, X::AbstractMatrix, Y::AbstractArray) =
dualSVMModel(spec, solution, predmodel, X, Y)
# ==========================================================================
"""
`PrimalSVM <: SVM`
Description
============
The result to the primal problem of a support vector machine.
It is the return value of the `svm` function if the parameter `dual = false`
Fields
=======
- **`params(this)`** : The structure and parameters of the utilized support vector machine
- **`nsv(this)`** : The number of support vectors
- **`svindex(this)`** : The indicies (i) of the support vectors within the training set
- **`features(this)`** : The full training set observations
- **`targets(this)`** : The full training set targets
- **`predmodel(this)`** : The underlying prediction model. It defines if the intercept
is present, as well as the input and output dimensions (univariate vs multivariate prediction)
- **`iterations(this)`** : Number of iterations used to fit the model.
- **`isconverged(this)`** : true, if the algorithm converged during training.
- **`fval(this)`** : The final objective value achieved during training.
- **`coef(this)`** : The fitted coefficients w⃗.
- **`details(this)`** : The training information returned by the solver itself.
It stores the final objective value, the number of used iterations,
the fitted coefficients w⃗, and a boolean that states if the algorithm was able to converge.
Methods
========
- **`predict(this, X)`** : predicts the response for the given observations in `X`.
Note that `X` should be of the same type as the data used for training the SVM.
- **`classify(this, X)`** : predicts the response for the given observations in `X`
and performs the decision function on the result.
Note that `X` should be of the same type as the data used for training the SVM.
- **`accuracy(this, X, y)`** : computes the accuracy by calculating the predictions
of `X` and comparing the results to `y`.
Note that `X` and `y` should have the same number of observations.
See also
=========
`svm`, `DualSVM`, `CSVM`
"""
abstract PrimalSVM{TSpec<:SVMSpec} <: SVM{TSpec}
"""
`DensePrimalSVM <: PrimalSVM`
See `PrimalSVM`
"""
type DensePrimalSVM{TSpec<:SVMSpec, TDetails<:PrimalSolution, TPred<:Predictor, XT<:DenseMatrix, YT<:AbstractVector} <: PrimalSVM{TSpec}
params::TSpec
details::TDetails
predmodel::TPred
nsv::Int
svindex::Vector{Int}
Xtrain::XT
Ytrain::YT
end
"""
`SparsePrimalSVM <: PrimalSVM`
See `PrimalSVM`
"""
type SparsePrimalSVM{TSpec<:SVMSpec, TDetails<:PrimalSolution, TPred<:Predictor, XT<:AbstractSparseMatrix, YT<:AbstractVector} <: PrimalSVM{TSpec}
params::TSpec
details::TDetails
predmodel::TPred
nsv::Int
svindex::Vector{Int}
Xtrain::XT
Ytrain::YT
end
# ==========================================================================
"""
`DualSVM <: SVM`
Description
============
The result to the dual problem of a support vector machine.
It is the return value of the `svm` function if the parameter `dual = true`
Fields
=======
- **`params(this)`** : The structure and parameters of the utilized support vector machine
- **`nsv(this)`** : The number of support vectors
- **`svindex(this)`** : The indicies (i) of the support vectors within the training set
- **`features(this)`** : The full training set observations to which the model was fit
- **`targets(this)`** : The full training set targets to which the model was fit
- **`predmodel(this)`** : The underlying prediction model. It defines if the intercept
is present, as well as the input and output dimensions (univariate vs multivariate prediction)
- **`iterations(this)`** : Number of iterations used to fit the model.
- **`isconverged(this)`** : true, if the algorithm converged during training.
- **`fval(this)`** : The final objective value achieved during training.
- **`coef(this)`** : The coefficients (α) of the support vectors
- **`xsv(this)`** : The support vectors (if dense), or the training set observations (if sparse)
- **`ysv(this)`** : The targets of the support vectors
- **`details(this)`** : The training information returned by the solver itself.
It includes the final objective value, the number of used iterations,
the full coefficient vector α, the bias if present, and a boolean that
states if the algorithm was able to converge.
Methods
========
- **`predict(this, X)`** : predicts the response for the given observations in `X`.
Note that `X` should be of the same form as the data used for training the SVM.
- **`classify(this, X)`** : predicts the response for the given observations in `X`
and performs the decision function on the result.
Note that `X` should be of the same type as the data used for training the SVM.
- **`accuracy(this, X, y)`** : computes the accuracy by calculating the predictions
of `X` and comparing the results to `y`.
Note that `X` and `y` should have the same number of observations.
See also
=========
`svm`, `PrimalSVM`, `CSVM`
"""
abstract DualSVM{TSpec<:SVMSpec} <: SVM{TSpec}
"""
`DenseDualSVM <: DualSVM`
See `DualSVM`
"""
type DenseDualSVM{TSpec<:SVMSpec, TPred<:Predictor, XT<:DenseMatrix, YT<:AbstractVector} <: DualSVM{TSpec}
params::TSpec
details::DualSolution
predmodel::TPred
nsv::Int
alpha::Vector{Float64}
svindex::Vector{Int}
Xsv::Vector{ContiguousView{Float64,1,Array{Float64,2}}}
Ysv::Vector{Float64}
Xtrain::XT
Ytrain::YT
end
"""
`SparseDualSVM <: DualSVM`
See `DualSVM`
"""
type SparseDualSVM{TSpec<:SVMSpec, TPred<:Predictor, XT<:AbstractSparseMatrix, YT<:AbstractVector} <: DualSVM{TSpec}
params::TSpec
details::DualSolution
predmodel::TPred
nsv::Int
alpha::Vector{Float64}
svindex::Vector{Int}
Xsv::XT
Ysv::Vector{Float64}
Xtrain::XT
Ytrain::YT
end
# ==========================================================================
function primalSVMModel{TSpec<:SVMSpec, TPred<:Predictor, TReal<:Real}(
params::TSpec,
s::PrimalSolution,
predmodel::TPred,
Xtrain::StridedMatrix,
Ytrain::AbstractVector{TReal})
n = size(Xtrain, 2)
p = value(predmodel, Xtrain, minimizer(s))
l = zeros(n)
@inbounds for i = 1:n
lc = value(params.loss, Float64(Ytrain[i]), p[i] - sign(p[i]) * 0.0005)
lr = value(params.loss, Float64(Ytrain[i]), p[i] + sign(p[i]) * 0.0005)
l[i] = lc != 0 || lr != 0
end
svindex = find(l)
nsv = length(svindex)
DensePrimalSVM{TSpec, typeof(s), TPred, typeof(Xtrain), typeof(Ytrain)}(params, s, predmodel, nsv, svindex, Xtrain, Ytrain)
end
function primalSVMModel{TSpec<:SVMSpec, TPred<:Predictor, TReal<:Real}(
params::TSpec,
s::PrimalSolution,
predmodel::TPred,
Xtrain::AbstractSparseMatrix,
Ytrain::AbstractVector{TReal})
n = size(Xtrain, 2)
w = minimizer(s)
p = if typeof(predmodel) <: LinearPredictor{true}
fill(w[end] * predmodel.bias, size(Ytrain))
else
zeros(size(Ytrain))
end
@inbounds for i = 1:n
tstart = Xtrain.colptr[i]
tstop = Xtrain.colptr[i+1] - 1
for j = tstart:tstop
k = Xtrain.rowval[j]
p[i] += Xtrain.nzval[j] * w[k]
end
end
l = zeros(n)
@inbounds for i = 1:n
lc = value(params.loss, Float64(Ytrain[i]), p[i] - sign(p[i]) * 0.0005,)
lr = value(params.loss, Float64(Ytrain[i]), p[i] + sign(p[i]) * 0.0005)
l[i] = lc != 0 || lr != 0
end
svindex = find(l)
nsv = length(svindex)
SparsePrimalSVM{TSpec, typeof(s), TPred, typeof(Xtrain), typeof(Ytrain)}(params, s, predmodel, nsv, svindex, Xtrain, Ytrain)
end
function dualSVMModel{TSpec<:SVMSpec, TPred<:Predictor, TReal<:Real}(
params::TSpec,
s::DualSolution,
p::TPred,
Xtrain::StridedMatrix,
Ytrain::AbstractVector{TReal})
svindex = find(minimizer(s))
alpha = minimizer(s)[svindex]
nsv = length(alpha)
Xsv = Array(ContiguousView{Float64,1,Array{Float64,2}}, nsv)
@inbounds for i in 1:nsv
Xsv[i] = view(Xtrain, :, svindex[i])
end
Ysv = Ytrain[svindex]
DenseDualSVM{TSpec, TPred, typeof(Xtrain), typeof(Ytrain)}(params, s, p, nsv, alpha, svindex, Xsv, Ysv, Xtrain, Ytrain)
end
function dualSVMModel{TSpec<:SVMSpec, TPred<:Predictor, TReal<:Real}(
params::TSpec,
s::DualSolution,
p::TPred,
Xtrain::AbstractSparseMatrix,
Ytrain::AbstractVector{TReal})
svindex = find(minimizer(s))
alpha = minimizer(s)[svindex]
nsv = length(alpha)
Xsv = Xtrain
Ysv = Ytrain[svindex]
SparseDualSVM{TSpec, TPred, typeof(Xtrain), typeof(Ytrain)}(params, s, p, nsv, alpha, svindex, Xsv, Ysv, Xtrain, Ytrain)
end
# ==========================================================================
@inline labels{TSpec<:SVCSpec}(svm::SVM{TSpec}) = [-1., 1]
@inline nobs(fit::SVM) = length(fit.Ytrain)
@inline features(fit::SVM) = fit.Xtrain
@inline targets(fit::SVM) = fit.Ytrain
@inline model_response(fit::SVM) = fit.Ytrain
@inline details(fit::SVM) = fit.details
@inline isconverged(fit::SVM) = isconverged(details(fit))
@inline iterations(fit::SVM) = iterations(details(fit))
@inline params(fit::SVM) = fit.params
@inline minimum(fit::SVM) = minimum(details(fit))
@inline minimizer(fit::SVM) = minimizer(details(fit))
@inline nsv(fit::SVM) = fit.nsv
@inline svindex(fit::SVM) = fit.svindex
@inline intercept(fit::SVM) = typeof(predmodel(fit)) <: LinearPredictor{true}
@inline predmodel(fit::SVM) = fit.predmodel
@inline coef(fit::PrimalSVM) = coef(details(fit))
@inline coef(fit::DualSVM) = fit.alpha
@inline bias(fit::DualSVM) = bias(details(fit))
@inline xsv(fit::DualSVM) = fit.Xsv
@inline ysv(fit::DualSVM) = fit.Ysv
@inline predict(fit::SVM) = predict(fit, features(fit))
@inline classify{TSpec<:SVCSpec}(svm::SVM{TSpec}) = classify(svm, features(svm))
@inline accuracy{TSpec<:SVCSpec}(svm::SVM{TSpec}) = accuracy(svm, features(svm), targets(svm))
function classify{TSpec<:SVCSpec}(svm::SVM{TSpec}, X)
ŷ = predict(svm, X)
t = ndims(ŷ) == 1 ? sign(ŷ) : vec(mapslices(indmax, ŷ, 1))
t
end
function accuracy{TSpec<:SVCSpec}(svm::SVM{TSpec}, X, y)
n = size(X,2)
n == length(y) || throw(DimensionMismatch("X and y have to have the same number of observations"))
ȳ = classify(svm, X)
countnz(ȳ .== y) / n
end
function predict(fit::PrimalSVM, X::DenseMatrix)
p = value(predmodel(fit), X, minimizer(details(fit)))
size(p, 1) == 1 ? vec(p) : p
end
function predict(fit::PrimalSVM, X::AbstractSparseMatrix)
n = size(X,2)
w = minimizer(details(fit))
p = if typeof(predmodel(fit)) <: LinearPredictor{true}
fill(w[end] * predmodel(fit).bias, size(targets(fit)))
else
zeros(size(targets(fit)))
end
@inbounds for i = 1:n
tstart = X.colptr[i]
tstop = X.colptr[i+1] - 1
for j = tstart:tstop
k = X.rowval[j]
p[i] += X.nzval[j] * w[k]
end
end
p
end
function predict(fit::DenseDualSVM, X::AbstractMatrix)
n = size(X,2)
result = zeros(n)
@inbounds for i in 1:n
for j in 1:nsv(fit)
result[i] += coef(fit)[j] * ysv(fit)[j] * dot(xsv(fit)[j], view(X,:,i))
end
result[i] += bias(fit)
end
result
end
function predict(fit::SparseDualSVM, X::AbstractMatrix)
n = size(X,2)
result = zeros(n)
tmp = 0.
@inbounds for i in 1:n
for j in 1:nsv(fit)
tstart = xsv(fit).colptr[svindex(fit)[j]]
tstop = xsv(fit).colptr[svindex(fit)[j]+1] - 1
tmp = 0.
for k = tstart:tstop
tmp += X[xsv(fit).rowval[k],i] * xsv(fit).nzval[k]
end
tmp *= ysv(fit)[j]
tmp *= coef(fit)[j]
result[i] += tmp
end
result[i] += bias(fit)
end
result
end
# ==========================================================================
# convert
function convert{TSpec <: SVMSpec{ScalarProductKernel{Float64}}}(
::Type{PrimalSVM},
dual::DualSVM{TSpec})
l = length(xsv(dual))
d = length(xsv(dual)[1])
# w = ∑ yᵢαᵢxᵢ
w = zeros(d+1)
for j = 1:d
for i = 1:l
@inbounds w[j] += ysv(dual)[i] * coef(dual)[i] * xsv(dual)[i][j]
end
end
sol = if typeof(predmodel(dual)) <: LinearPredictor{true}
w[end] = bias(dual)
PrimalSolution(w, minimum(dual), iterations(dual), isconverged(dual))
else
PrimalSolution(w[1:d], minimum(dual), iterations(dual), isconverged(dual))
end
svmModel(params(dual), sol, predmodel(dual), features(dual), targets(dual))
end
function convert{TSpec <: SVMSpec{ScalarProductKernel{Float64}}}(
::Type{DualSVM},
primal::PrimalSVM{TSpec})
k = size(features(primal), 1)
n = size(features(primal), 2)
nsv = nsv(primal)
svindex = svindex(primal)
nsv <= k || throw(DimensionMismatch("Converting to dual solution is only possible if there are less (or equal) support vectors than there are features"))
Q = zeros(k, nsv)
for i = 1:nsv
for j = 1:k
@inbounds Q[j, i] = targets(primal)[svindex[i]] * features(primal)[j, svindex[i]]
end
end
# DOESN'T WORK FOR NO BIAS
w = minimizer(details(primal))[1:k]
α = Q \ w
alpha = zeros(n)
alpha[svindex] = α
sol = DualSolution(alpha, minimizer(details(primal))[end], minimum(primal), iterations(primal), isconverged(primal))
svmModel(params(primal), sol, predmodel(primal), features(primal), targets(primal))
end
# ==========================================================================
# Plotting
function scatterplot{TSpec<:SVCSpec}(
fit::PrimalSVM{TSpec};
title::AbstractString = "Primal SVM Classification Plot",
xlim = [0.,0.],
ylim = [0.,0.],
lbl = map(string, labels(fit)),
nargs...)
size(features(fit),1) == 2 || throw(DimensionMismatch("Can only plot the SVM classification for a two-dimensional featurespace (i.e. size(X,1) == 2)"))
intercept_fit = typeof(predmodel(fit)) <: LinearPredictor{true}
offset = intercept_fit ? -(minimizer(details(fit))[3] * predmodel(fit).bias) / minimizer(details(fit))[2] : 0.
slope = -minimizer(details(fit))[1] / minimizer(details(fit))[2]
x1 = vec(view(features(fit), 1, :))
x2 = vec(view(features(fit), 2, :))
x1sv = x1[svindex(fit)]
x2sv = x2[svindex(fit)]
xmin = minimum(x1); xmax = maximum(x1)
ymin = minimum(x2); ymax = maximum(x2)
xlim = xlim == [0.,0.] ? [xmin, xmax] : xlim
ylim = ylim == [0.,0.] ? [ymin, ymax] : ylim
notalphaindex = setdiff(1:size(features(fit),2), svindex(fit))
x1 = x1[notalphaindex]
x2 = x2[notalphaindex]
y = targets(fit)[notalphaindex]
fig = scatterplot(x1[y.<0], x2[y.<0]; title = title, xlim = xlim, ylim = ylim, name = lbl[1], nargs...)
scatterplot!(fig, x1[y.>0], x2[y.>0], name = lbl[2])
scatterplot!(fig, x1sv, x2sv, color = :yellow, name = "support vectors")
lineplot!(fig, offset, slope, color = :white)
xlabel!(fig, "X₁")
ylabel!(fig, "X₂")
fig
end
function scatterplot{TSpec<:SVCSpec}(
fit::DualSVM{TSpec};
title::AbstractString = "Dual SVM Classification Plot",
xlim = [0.,0.],
ylim = [0.,0.],
lbl = map(string, labels(fit)),
nargs...)
size(features(fit),1) == 2 || throw(DimensionMismatch("Can only plot the SVM classification for a two-dimensional featurespace (i.e. size(X,1) == 2)"))
x1 = vec(view(features(fit), 1, :))
x2 = vec(view(features(fit), 2, :))
x1sv = x1[svindex(fit)]
x2sv = x2[svindex(fit)]
xmin = minimum(x1); xmax = maximum(x1)
ymin = minimum(x2); ymax = maximum(x2)
xlim = xlim == [0.,0.] ? [xmin, xmax] : xlim
ylim = ylim == [0.,0.] ? [ymin, ymax] : ylim
notalphaindex = setdiff(1:size(features(fit),2), svindex(fit))
x1 = x1[notalphaindex]
x2 = x2[notalphaindex]
y = targets(fit)[notalphaindex]
fig = scatterplot(x1[y.<0], x2[y.<0]; title = title, xlim = xlim, ylim = ylim, name = lbl[1], nargs...)
scatterplot!(fig, x1[y.>0], x2[y.>0], name = lbl[2])
scatterplot!(fig, x1sv, x2sv, color = :yellow, name = "support vectors")
xlabel!(fig, "X₁")
ylabel!(fig, "X₂")
fig
end
# ==========================================================================
# Base.show
function _showprimal(io::IO, fit)
_printconverged(io, isconverged(fit), iterations(fit))
_printvariable(io, 19, "details()", typeof(details(fit)))
_printvariable(io, 19, "isconverged()", isconverged(fit))
_printvariable(io, 19, "iterations()", iterations(fit))
println(io, "\n ◦ support vector machine:")
_printvariable(io, 14, "params()", params(fit))
println(io, "\n ◦ objective value (f):")
_printvariable(io, 17, "minimum()", minimum(fit))
println(io, "\n ◦ fitted coefficients (w⃗):")
_printvariable(io, 17, "coef()", coef(fit))
_printvariable(io, 17, "intercept()", intercept(fit))
_printvariable(io, 17, "predmodel()", predmodel(fit))
println(io, "\n ◦ support vectors (estimated):")
_printvariable(io, 17, "nsv()", nsv(fit))
_printvariable(io, 17, "svindex()", svindex(fit))
if size(features(fit),1) == 2 && size(features(fit),2) < 500
println(io, "\n ◦ classification plot (UnicodePlots.scatterplot(..)):")
fig = scatterplot(fit, margin = 5, width = 30, height = 10, title = "")
print(io, fig)
end
end
function _showdual(io::IO, fit)
_printconverged(io, isconverged(fit), iterations(fit))
_printvariable(io, 19, "details()", typeof(details(fit)))
_printvariable(io, 19, "isconverged()", isconverged(fit))
_printvariable(io, 19, "iterations()", iterations(fit))
println(io, "\n ◦ support vector machine:")
_printvariable(io, 14, "params()", params(fit))
println(io, "\n ◦ objective value (f):")
_printvariable(io, 17, "minimum()", minimum(fit))
println(io, "\n ◦ fitted coefficients (α):")
_printvariable(io, 17, "coef()", coef(fit))
intercept(fit) && _printvariable(io, 17, "bias()", bias(fit))
_printvariable(io, 17, "intercept()", intercept(fit))
_printvariable(io, 17, "predmodel()", predmodel(fit))
println(io, "\n ◦ support vectors:")
_printvariable(io, 17, "nsv()", nsv(fit))
_printvariable(io, 17, "svindex()", svindex(fit))
_printvariable(io, 17, "xsv()", typeof(xsv(fit)))
_printvariable(io, 17, "ysv()", ysv(fit))
if size(features(fit),1) == 2 && size(features(fit),2) < 500
println(io, "\n ◦ classification plot (UnicodePlots.scatterplot(..)):")
fig = scatterplot(fit, margin = 5, width = 30, height = 10, title = "")
print(io, fig)
end
end
function show(io::IO, fit::PrimalSVM)
println(io, typeof(fit), "\n")
_showprimal(io, fit)
end
function show(io::IO, fit::DualSVM)
println(io, typeof(fit), "\n")
_showdual(io, fit)
end
| {"hexsha": "9ad680ff5cb6f44aa2b360a37996ede54ccc827f", "size": 18586, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/svm_model.jl", "max_stars_repo_name": "Evizero/KSVM.jl", "max_stars_repo_head_hexsha": "1fec5d43f615fcf3964f96ca27dd7ad32325bd55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 35, "max_stars_repo_stars_event_min_datetime": "2015-09-24T23:58:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-24T23:29:15.000Z", "max_issues_repo_path": "src/svm_model.jl", "max_issues_repo_name": "Evizero/KSVM.jl", "max_issues_repo_head_hexsha": "1fec5d43f615fcf3964f96ca27dd7ad32325bd55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2015-09-11T09:05:57.000Z", "max_issues_repo_issues_event_max_datetime": "2019-09-30T18:24:53.000Z", "max_forks_repo_path": "src/svm_model.jl", "max_forks_repo_name": "Evizero/KSVM.jl", "max_forks_repo_head_hexsha": "1fec5d43f615fcf3964f96ca27dd7ad32325bd55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-03-21T12:46:48.000Z", "max_forks_repo_forks_event_max_datetime": "2019-09-18T15:11:38.000Z", "avg_line_length": 33.368043088, "max_line_length": 155, "alphanum_fraction": 0.6446249865, "num_tokens": 5875} |
# audio-offset-finder
#
# Copyright (c) 2014 British Broadcasting Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from subprocess import Popen, PIPE
from scipy.io import wavfile
from scikits.talkbox.features.mfcc import mfcc
import os, tempfile, warnings
import numpy as np
def find_offset(file1, file2, fs=8000, trim=60*15, correl_nframes=1000):
tmp1 = convert_and_trim(file1, fs, trim)
tmp2 = convert_and_trim(file2, fs, trim)
# Removing warnings because of 18 bits block size
# outputted by ffmpeg
# https://trac.ffmpeg.org/ticket/1843
warnings.simplefilter("ignore", wavfile.WavFileWarning)
a1 = wavfile.read(tmp1, mmap=True)[1] / (2.0 ** 15)
a2 = wavfile.read(tmp2, mmap=True)[1] / (2.0 ** 15)
# We truncate zeroes off the beginning of each signals
# (only seems to happen in ffmpeg, not in sox)
a1 = ensure_non_zero(a1)
a2 = ensure_non_zero(a2)
mfcc1 = mfcc(a1, nwin=256, nfft=512, fs=fs, nceps=13)[0]
mfcc2 = mfcc(a2, nwin=256, nfft=512, fs=fs, nceps=13)[0]
mfcc1 = std_mfcc(mfcc1)
mfcc2 = std_mfcc(mfcc2)
c = cross_correlation(mfcc1, mfcc2, nframes=correl_nframes)
max_k_index = np.argmax(c)
# The MFCC window overlap is hardcoded in scikits.talkbox
offset = max_k_index * 160.0 / float(fs) # * over / sample rate
score = (c[max_k_index] - np.mean(c)) / np.std(c) # standard score of peak
os.remove(tmp1)
os.remove(tmp2)
return offset, score
def ensure_non_zero(signal):
# We add a little bit of static to avoid
# 'divide by zero encountered in log'
# during MFCC computation
signal += np.random.random(len(signal)) * 10**-10
return signal
def cross_correlation(mfcc1, mfcc2, nframes):
n1, mdim1 = mfcc1.shape
n2, mdim2 = mfcc2.shape
n = n1 - nframes + 1
c = np.zeros(n)
for k in range(n):
cc = np.sum(np.multiply(mfcc1[k:k+nframes], mfcc2[:nframes]), axis=0)
c[k] = np.linalg.norm(cc)
return c
def std_mfcc(mfcc):
return (mfcc - np.mean(mfcc, axis=0)) / np.std(mfcc, axis=0)
def convert_and_trim(afile, fs, trim):
tmp = tempfile.NamedTemporaryFile(mode='r+b', prefix='offset_', suffix='.wav')
tmp_name = tmp.name
tmp.close()
psox = Popen([
'ffmpeg', '-loglevel', 'panic', '-i', afile,
'-ac', '1', '-ar', str(fs), '-ss', '0', '-t', str(trim),
'-acodec', 'pcm_s16le', tmp_name
], stderr=PIPE)
psox.communicate()
if not psox.returncode == 0:
raise Exception("FFMpeg failed")
return tmp_name
class OffsetFinder:
def __init__(self, source_filename, fs=8000, trim=60*15, correl_nframes=1000):
self.source_filename = source_filename
self.fs = fs
self.trim = trim
self.correl_nframes = correl_nframes
self.large_audio_path = ""
def __del__(self):
if self.large_audio_path != "":
os.remove(self.large_audio_path)
def init(self):
tmp, mfcc_out = self.get_mfcc(self.source_filename, fs=self.fs, trim=self.trim,
correl_nframes=self.correl_nframes)
self.large_audio_path = tmp
self.large_mfcc = mfcc_out
@staticmethod
def get_mfcc(filename, fs=8000, trim=60*15, correl_nframes=1000):
tmp = convert_and_trim(filename, fs, trim)
# Removing warnings because of 18 bits block size
# outputted by ffmpeg
# https://trac.ffmpeg.org/ticket/1843
warnings.simplefilter("ignore", wavfile.WavFileWarning)
a = wavfile.read(tmp, mmap=True)[1] / (2.0 ** 15)
# We truncate zeroes off the beginning of each signals
# (only seems to happen in ffmpeg, not in sox)
a = ensure_non_zero(a)
mfcc_out = mfcc(a, nwin=256, nfft=512, fs=fs, nceps=13)[0]
mfcc_out = std_mfcc(mfcc_out)
return tmp, mfcc_out
def find_offset(self, filename, fs=8000, trim=60*15, correl_nframes=1000):
tmp, mfcc_out = self.get_mfcc(filename)
c = cross_correlation(self.large_mfcc, mfcc_out, nframes=correl_nframes)
max_k_index = np.argmax(c)
# The MFCC window overlap is hardcoded in scikits.talkbox
offset = max_k_index * 160.0 / float(fs) # * over / sample rate
score = (c[max_k_index] - np.mean(c)) / np.std(c) # standard score of peak
# remove the temp converted audio file
os.remove(tmp)
return offset, score
| {"hexsha": "33b7af1ac4bdace212f4045ed6740fbee57bb1fe", "size": 4935, "ext": "py", "lang": "Python", "max_stars_repo_path": "audio_offset_finder/audio_offset_finder.py", "max_stars_repo_name": "CalderWhite/audio-offset-finder", "max_stars_repo_head_hexsha": "83708a6f40a0a3cde347b87ecc25ff06fa0743db", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "audio_offset_finder/audio_offset_finder.py", "max_issues_repo_name": "CalderWhite/audio-offset-finder", "max_issues_repo_head_hexsha": "83708a6f40a0a3cde347b87ecc25ff06fa0743db", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "audio_offset_finder/audio_offset_finder.py", "max_forks_repo_name": "CalderWhite/audio-offset-finder", "max_forks_repo_head_hexsha": "83708a6f40a0a3cde347b87ecc25ff06fa0743db", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.5104895105, "max_line_length": 87, "alphanum_fraction": 0.6559270517, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1422} |
import numpy as np
import unittest | {"hexsha": "7f6498b686ca2f35694ea194e5b4ccbeaf232ac6", "size": 34, "ext": "py", "lang": "Python", "max_stars_repo_path": "MCMC/test/test_00.py", "max_stars_repo_name": "Evanzai/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers", "max_stars_repo_head_hexsha": "051477a7f36090ec5c9cb9941afa1a81c761dd27", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MCMC/test/test_00.py", "max_issues_repo_name": "Evanzai/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers", "max_issues_repo_head_hexsha": "051477a7f36090ec5c9cb9941afa1a81c761dd27", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MCMC/test/test_00.py", "max_forks_repo_name": "Evanzai/Probabilistic-Programming-and-Bayesian-Methods-for-Hackers", "max_forks_repo_head_hexsha": "051477a7f36090ec5c9cb9941afa1a81c761dd27", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.0, "max_line_length": 18, "alphanum_fraction": 0.8529411765, "include": true, "reason": "import numpy", "num_tokens": 7} |
@symbol_func function P_E(cur_reactor::AbstractReactor)
cur_P = P_T(cur_reactor)
cur_P *= cur_reactor.eta_T
cur_P
end
| {"hexsha": "81aa1eb7cec36f526e9a0f279768d6cafcc8a263", "size": 126, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/methods/reactors/powers/p_e.jl", "max_stars_repo_name": "djsegal/Fusion.jl", "max_stars_repo_head_hexsha": "a0540fbf3345a778965fa092e9e56907a44c6521", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2017-12-31T10:16:41.000Z", "max_stars_repo_stars_event_max_datetime": "2018-03-13T22:41:17.000Z", "max_issues_repo_path": "src/methods/reactors/powers/p_e.jl", "max_issues_repo_name": "djsegal/Fusion.jl", "max_issues_repo_head_hexsha": "a0540fbf3345a778965fa092e9e56907a44c6521", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 21, "max_issues_repo_issues_event_min_datetime": "2017-04-11T05:06:15.000Z", "max_issues_repo_issues_event_max_datetime": "2017-11-23T05:06:33.000Z", "max_forks_repo_path": "src/methods/reactors/powers/p_e.jl", "max_forks_repo_name": "djsegal/Fussy.jl", "max_forks_repo_head_hexsha": "a0540fbf3345a778965fa092e9e56907a44c6521", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2017-04-26T16:58:21.000Z", "max_forks_repo_forks_event_max_datetime": "2017-04-27T15:25:51.000Z", "avg_line_length": 15.75, "max_line_length": 55, "alphanum_fraction": 0.7619047619, "num_tokens": 41} |
abstract type ShepardType <: InterpolationMethod end
export Shepard
"""
Shepard(P = 2)
Standard Shepard interpolation with power parameter `P`.
"""
struct Shepard{T} <: ShepardType where T <: Real
P::T
end
Shepard() = Shepard(2)
struct ShepardInterpolant{T1, T2, F, M} <: ScatteredInterpolant where {T1 <: AbstractArray, T2 <: AbstractMatrix{<:Real}}
data::T1
points::T2
idw::F
metric::M
end
# No need to compute anything here, everything is done in the evaluation step.
function interpolate(idw::ShepardType,
points::AbstractArray{<:Real,2},
samples::AbstractArray{<:Number,N};
metric = Euclidean()) where {N}
return ShepardInterpolant(samples, points, idw, metric)
end
function evaluate(itp::ShepardInterpolant, points::AbstractArray{<:Real,2})
# Compute distances between sample points and interpolation points
d = pairwise(itp.metric, itp.points, points;dims=2)
# Evaluate point by point
m = size(points, 2)
n = size(itp.data, 2)
values = zeros(eltype(itp.data), m, n)
for i = 1:m
d_col = d[:,i]
# If an interpolation point coincide with a sampling point, just return the
# original data. Otherwise, compute distance-weighted sum
if !all(r > 0 for r in d_col)
ind = findfirst(x -> x ≈ 0.0, d_col)
values[i,:] = itp.data[ind, :]
else
values[i,:] = evaluatePoint(itp.idw, itp.points, itp.data, d_col)
end
end
return values
end
# Original Shepard
function evaluatePoint(idw::Shepard,
dataPoints::AbstractArray{<:Real,2},
data::AbstractArray{<:Number,N},
d::AbstractVector) where {N}
# Compute weigths and return the weighted sum
w = 1.0./(d.^idw.P)
value = sum(w.*data, dims = 1)./sum(w)
end | {"hexsha": "2d959caa8476eb7df4d496555c48bd86658abd70", "size": 1900, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/idw.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/ScatteredInterpolation.jl-3f865c0f-6dca-5f4d-999b-29fe1e7e3c92", "max_stars_repo_head_hexsha": "c3c107fe7255c8967f6a1111b48f8a8a921a0dcf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-03-19T09:22:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-06T12:20:02.000Z", "max_issues_repo_path": "src/idw.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/ScatteredInterpolation.jl-3f865c0f-6dca-5f4d-999b-29fe1e7e3c92", "max_issues_repo_head_hexsha": "c3c107fe7255c8967f6a1111b48f8a8a921a0dcf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2018-04-13T10:43:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-08T13:11:57.000Z", "max_forks_repo_path": "src/idw.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/ScatteredInterpolation.jl-3f865c0f-6dca-5f4d-999b-29fe1e7e3c92", "max_forks_repo_head_hexsha": "c3c107fe7255c8967f6a1111b48f8a8a921a0dcf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2018-05-07T16:02:04.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-23T17:16:28.000Z", "avg_line_length": 28.3582089552, "max_line_length": 121, "alphanum_fraction": 0.6147368421, "num_tokens": 505} |
\subsection{SSNIP test}
Small but significant and non-transitory increase in price
Would this cause customers to move elsewhere?
If we have demand at different prices we can estimate whether it is worth monopolising the market.
Because this lack of motivation may be because of competitor goods, we can include these to see if that market is worth monopolising.
For example: hiking up the price of one good may not be profitable. Therefore that is not a relevant market (though it could be!). Then we can see if hiking up the price of that good, and others, is profitable. If so, then it is a relevant market.
Note:
High elasticity may be because monopoly power is already being exerted
Identify smallest market where a monopolist could increas price profitably
How to do test? interview customers about whether increase in price would negatively affect them. want to know if they could switch.
If could switch at price rise of say 5%, then market is too small to "be worth monopolising". choose larger
Can be used to estimate elasticity of demand
We can also look at the cost impact from cutting units. if high variable, then more appealing
We can expand to include substitutes. if substitutes make worth monopolisign, then merger can be concerning.
| {"hexsha": "e2bf06e08d06ef574bc0b79c63d7da4d8f6e2726", "size": 1266, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "src/pug/theory/economics/competitionPolicy/04-01-SSNIP.tex", "max_stars_repo_name": "adamdboult/nodeHomePage", "max_stars_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/pug/theory/economics/competitionPolicy/04-01-SSNIP.tex", "max_issues_repo_name": "adamdboult/nodeHomePage", "max_issues_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2021-03-03T12:36:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-01T22:16:09.000Z", "max_forks_repo_path": "src/pug/theory/economics/competitionPolicy/04-01-SSNIP.tex", "max_forks_repo_name": "adamdboult/nodeHomePage", "max_forks_repo_head_hexsha": "266bfc6865bb8f6b1530499dde3aa6206bb09b93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.2, "max_line_length": 247, "alphanum_fraction": 0.7962085308, "num_tokens": 261} |
module PrintFileTree
export printfiletree
"""
printfiletree()
printfiletree(root)
Like the unix utility `tree` (https://linux.die.net/man/1/tree).
Prints complete recursive directory structure of root and all its contents.
"""
function printfiletree(root=".")
println(root);
d,f = printfiletree_helper(root)
println("\n$d directories, $f files")
end
function printfiletree_helper(root, depth=0, opendirs=[true], dirscount=fill(0), filescount=fill(0))
files = readdir(root)
for (i,f) in enumerate(files)
startswith(f, ".") && continue
lastitem = (i == length(files))
lastitem && (opendirs[end] = false)
for p in opendirs[1:end-1]
print(p ? "│ " : " ")
end
println("$(lastitem ? "└" : "├")── " * f) # path
path = joinpath(root, f)
if isdir(path)
dirscount[] += 1
push!(opendirs, true)
printfiletree_helper(path, depth+1, opendirs, dirscount, filescount)
pop!(opendirs)
else
filescount[] += 1
end
end
dirscount[], filescount[]
end
end # module
| {"hexsha": "cf6b73d5bedb9468c6ce507683cb7f0d79f054d5", "size": 1140, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PrintFileTree.jl", "max_stars_repo_name": "UnofficialJuliaMirror/PrintFileTree.jl-083f6447-9a71-585a-bc2e-c0ad124fd6e4", "max_stars_repo_head_hexsha": "a8c54b150ed7aedcc2f0a0fdd4a38da9dc00eec5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-07-21T04:51:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-31T05:24:52.000Z", "max_issues_repo_path": "src/PrintFileTree.jl", "max_issues_repo_name": "UnofficialJuliaMirror/PrintFileTree.jl-083f6447-9a71-585a-bc2e-c0ad124fd6e4", "max_issues_repo_head_hexsha": "a8c54b150ed7aedcc2f0a0fdd4a38da9dc00eec5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-04-16T06:02:31.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-14T19:35:46.000Z", "max_forks_repo_path": "src/PrintFileTree.jl", "max_forks_repo_name": "UnofficialJuliaMirror/PrintFileTree.jl-083f6447-9a71-585a-bc2e-c0ad124fd6e4", "max_forks_repo_head_hexsha": "a8c54b150ed7aedcc2f0a0fdd4a38da9dc00eec5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-02-08T11:35:52.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-31T05:25:06.000Z", "avg_line_length": 26.511627907, "max_line_length": 100, "alphanum_fraction": 0.5877192982, "num_tokens": 311} |
/-
Copyright (c) 2021 Yakov Pechersky. All rights reserved.
Released under Apache 2.0 license as described in the file LICENSE.
Authors: Yakov Pechersky
-/
import data.equiv.basic
import data.set.finite
import group_theory.perm.sign
/-! # Equivalence between fintypes
This file contains some basic results on equivalences where one or both
sides of the equivalence are `fintype`s.
# Main definitions
- `function.embedding.to_equiv_range`: computably turn an embedding of a
fintype into an `equiv` of the domain to its range
- `equiv.perm.via_fintype_embedding : perm α → (α ↪ β) → perm β` extends the domain of
a permutation, fixing everything outside the range of the embedding
# Implementation details
- `function.embedding.to_equiv_range` uses a computable inverse, but one that has poor
computational performance, since it operates by exhaustive search over the input `fintype`s.
-/
variables {α β : Type*} [fintype α] [decidable_eq β] (e : equiv.perm α) (f : α ↪ β)
/--
Computably turn an embedding `f : α ↪ β` into an equiv `α ≃ set.range f`,
if `α` is a `fintype`. Has poor computational performance, due to exhaustive searching in
constructed inverse. When a better inverse is known, use `equiv.of_left_inverse'` or
`equiv.of_left_inverse` instead. This is the computable version of `equiv.of_injective`.
-/
def function.embedding.to_equiv_range : α ≃ set.range f :=
⟨λ a, ⟨f a, set.mem_range_self a⟩, f.inv_of_mem_range, λ _, by simp, λ _, by simp⟩
@[simp] lemma function.embedding.to_equiv_range_apply (a : α) :
f.to_equiv_range a = ⟨f a, set.mem_range_self a⟩ := rfl
@[simp] lemma function.embedding.to_equiv_range_symm_apply_self (a : α) :
f.to_equiv_range.symm ⟨f a, set.mem_range_self a⟩ = a :=
by simp [equiv.symm_apply_eq]
lemma function.embedding.to_equiv_range_eq_of_injective :
f.to_equiv_range = equiv.of_injective f f.injective :=
by { ext, simp }
/--
Extend the domain of `e : equiv.perm α`, mapping it through `f : α ↪ β`.
Everything outside of `set.range f` is kept fixed. Has poor computational performance,
due to exhaustive searching in constructed inverse due to using `function.embedding.to_equiv_range`.
When a better `α ≃ set.range f` is known, use `equiv.perm.via_set_range`.
When `[fintype α]` is not available, a noncomputable version is available as
`equiv.perm.via_embedding`.
-/
def equiv.perm.via_fintype_embedding : equiv.perm β :=
e.extend_domain f.to_equiv_range
@[simp] lemma equiv.perm.via_fintype_embedding_apply_image (a : α) :
e.via_fintype_embedding f (f a) = f (e a) :=
begin
rw equiv.perm.via_fintype_embedding,
convert equiv.perm.extend_domain_apply_image e _ _
end
lemma equiv.perm.via_fintype_embedding_apply_mem_range {b : β} (h : b ∈ set.range f) :
e.via_fintype_embedding f b = f (e (f.inv_of_mem_range ⟨b, h⟩)) :=
by simpa [equiv.perm.via_fintype_embedding, equiv.perm.extend_domain_apply_subtype, h]
lemma equiv.perm.via_fintype_embedding_apply_not_mem_range {b : β} (h : b ∉ set.range f) :
e.via_fintype_embedding f b = b :=
by rwa [equiv.perm.via_fintype_embedding, equiv.perm.extend_domain_apply_not_subtype]
@[simp] lemma equiv.perm.via_fintype_embedding_sign [decidable_eq α] [fintype β] :
equiv.perm.sign (e.via_fintype_embedding f) = equiv.perm.sign e :=
by simp [equiv.perm.via_fintype_embedding]
namespace equiv
variables {p q : α → Prop} [decidable_pred p] [decidable_pred q]
/-- If `e` is an equivalence between two subtypes of a fintype `α`, `e.to_compl`
is an equivalence between the complement of those subtypes.
See also `equiv.compl`, for a computable version when a term of type
`{e' : α ≃ α // ∀ x : {x // p x}, e' x = e x}` is known. -/
noncomputable def to_compl (e : {x // p x} ≃ {x // q x}) : {x // ¬ p x} ≃ {x // ¬ q x} :=
classical.choice (fintype.card_eq.mp (fintype.card_compl_eq_card_compl (fintype.card_congr e)))
/-- If `e` is an equivalence between two subtypes of a fintype `α`, `e.extend_subtype`
is a permutation of `α` acting like `e` on the subtypes and doing something arbitrary outside.
Note that when `p = q`, `equiv.perm.subtype_congr e (equiv.refl _)` can be used instead. -/
noncomputable abbreviation extend_subtype (e : {x // p x} ≃ {x // q x}) : perm α :=
subtype_congr e e.to_compl
lemma extend_subtype_apply_of_mem (e : {x // p x} ≃ {x // q x}) (x) (hx : p x) :
e.extend_subtype x = e ⟨x, hx⟩ :=
by { dunfold extend_subtype,
simp only [subtype_congr, equiv.trans_apply, equiv.sum_congr_apply],
rw [sum_compl_apply_symm_of_pos _ _ hx, sum.map_inl, sum_compl_apply_inl] }
lemma extend_subtype_mem (e : {x // p x} ≃ {x // q x}) (x) (hx : p x) :
q (e.extend_subtype x) :=
by { convert (e ⟨x, hx⟩).2,
rw [e.extend_subtype_apply_of_mem _ hx, subtype.val_eq_coe] }
lemma extend_subtype_apply_of_not_mem (e : {x // p x} ≃ {x // q x}) (x) (hx : ¬ p x) :
e.extend_subtype x = e.to_compl ⟨x, hx⟩ :=
by { dunfold extend_subtype,
simp only [subtype_congr, equiv.trans_apply, equiv.sum_congr_apply],
rw [sum_compl_apply_symm_of_neg _ _ hx, sum.map_inr, sum_compl_apply_inr] }
lemma extend_subtype_not_mem (e : {x // p x} ≃ {x // q x}) (x) (hx : ¬ p x) :
¬ q (e.extend_subtype x) :=
by { convert (e.to_compl ⟨x, hx⟩).2,
rw [e.extend_subtype_apply_of_not_mem _ hx, subtype.val_eq_coe] }
end equiv
| {"author": "jjaassoonn", "repo": "projective_space", "sha": "11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce", "save_path": "github-repos/lean/jjaassoonn-projective_space", "path": "github-repos/lean/jjaassoonn-projective_space/projective_space-11fe19fe9d7991a272e7a40be4b6ad9b0c10c7ce/src/data/equiv/fintype.lean"} |
# -*- coding: utf-8 -*-
"""
Created on Mon May 17 13:00:45 2021
@author: ben-o_000
"""
import numpy as np
import pysatellite.config as cfg
def AERtoECI(posAER, stepLength, stepNum, OriECEF, latOri, lonOri):
"""
Function for converting Az/Elev/Range to Latitude/Longitude/Altitude
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posAER: A 1x3 or 3x1 vector containing the Azimuth, Elevation, and Range
positions in radians and metres, respectively.
OriECEF: A 1x3 or 3x1 vector containing the origin of the local NED frame
as xECEF, yECEF, and zECEF respectively.
latOri: The latitude of the origin of the local NED frame in radians.
lonOri: The longitude of the origin of the local NED frame in radians.
WGS: The WGS84 reference ellipsoid
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posLLA: A 3x1 vector containing the latitude, longitude, and altitude
positions in radians and metres, respectively.
"""
omega = np.float64(7.2921158553e-5) # Earth rotation rate (radians/sec) ~SIDEREAL
sin = np.sin
cos = np.cos
az = posAER[0]
elev = posAER[1]
ran = posAER[2]
zUp = ran * sin(elev)
r = ran * cos(elev)
yEast = r * sin(az)
xNorth = r * cos(az)
posNED = np.array([[xNorth, yEast, -zUp]], dtype='float64').T
# rotMatrix = [[(-sin(latOri)*cos(lonOri)), -sin(lonOri), (-cos(latOri) * cos(lonOri))], [(-sin(latOri) * sin(lonOri)), cos(lonOri), (-cos(latOri) * sin(lonOri))], [cos(latOri), 0, (-sin(lonOri))]]
# rotMatrix = np.array(rotMatrix)
rotMatrix = np.array([[(-sin(latOri)*cos(lonOri)), -sin(lonOri), (-cos(latOri) * cos(lonOri))],
[(-sin(latOri) * sin(lonOri)), cos(lonOri), (-cos(latOri) * sin(lonOri))],
[cos(latOri), 0.0, (-sin(latOri))]], dtype='float64')
posECEFDelta = rotMatrix @ posNED
posECEF = posECEFDelta + OriECEF
# Generate matrices for multiplication
rotationMatrix = np.array([[cos(stepNum*stepLength*omega), -sin(stepNum*stepLength*omega), 0.0],
[sin(stepNum*stepLength*omega), cos(stepNum*stepLength*omega), 0.0],
[0.0, 0.0, 1.0]], dtype='float64')
posECI = rotationMatrix @ posECEF
return posECI
def AERtoLLA(posAER, OriECEF, latOri, lonOri):
'''
Function for converting Az/Elev/Range to Latitude/Longitude/Altitude
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posAER: A 1x3 or 3x1 vector containing the Azimuth, Elevation, and Range
positions in radians and metres, respectively.
OriECEF: A 1x3 or 3x1 vector containing the origin of the local NED frame
as xECEF, yECEF, and zECEF respectively.
latOri: The latitude of the origin of the local NED frame in radians.
lonOri: The longitude of the origin of the local NED frame in radians.
WGS: The WGS84 reference ellipsoid
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posLLA: A 3x1 vector containing the latitude, longitude, and altitude
positions in radians and metres, respectively.
'''
sin = np.sin
cos = np.cos
WGS = cfg.WGS
az = posAER[0]
elev = posAER[1]
ran = posAER[2]
zUp = ran * sin(elev)
r = ran * cos(elev)
yEast = r * sin(az)
xNorth = r * cos(az)
cosPhi = cos(latOri)
sinPhi = sin(latOri)
cosLambda = cos(lonOri)
sinLambda = sin(lonOri)
zDown = -zUp
t = cosPhi * -zDown - sinPhi * xNorth
dz = sinPhi * -zDown + cosPhi * xNorth
dx = cosLambda * t - sinLambda * yEast
dy = sinLambda * t + cosLambda * yEast
xECEF = OriECEF[0] + dx
yECEF = OriECEF[1] + dy
zECEF = OriECEF[2] + dz
# Ellipsoid properties
a = WGS["SemimajorAxis"] # Semimajor axis
b = WGS["SemiminorAxis"] # Semiminor axis
f = WGS["Flattening"] # Flattening
e2 = f * (2 - f) # Square of (first) eccentricity
ep2 = e2 / (1 - e2) # Square of second eccentricity
e = np.sqrt((a**2 - b**2) / a**2)
ePrime = np.sqrt((a**2 - b**2) / b**2)
#Closed formula set
p = np.sqrt(xECEF**2+yECEF**2)
theta = np.arctan2((zECEF * a), (p * b))
lon = np.arctan2(yECEF,xECEF)
#lon = mod(lon,2*pi)
lat = np.arctan2((zECEF + (ePrime**2 * b * (sin(theta))**3)), (p - (e**2 * a * (cos(theta))**3)))
N = a / (np.sqrt(1 - e**2 * (sin(lat))**2))
alt = (p / cos(lat)) - N
posLLA = [lat], [lon], [alt]
return posLLA
def AERtoNED(posAER):
'''
Function for converting Az/Elev/Range to local North/East/Down
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posAER: A 1x3 or 3x1 vector containing the Azimuth, Elevation, and Range
positions in radians and metres, respectively.
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posNED: A 3x1 vector containing the north, east, and down positions,
respectively.
'''
sin = np.sin
cos = np.cos
az = posAER[0]
elev = posAER[1]
ran = posAER[2]
zUp = ran * sin(elev)
r = ran * cos(elev)
yEast = r * sin(az)
xNorth = r * cos(az)
posNED = [xNorth], [yEast], [-zUp]
return posNED
def ECEFtoECI(posECEF, stepLength, stepNum):
'''
Function for converting ECEF coordinates to ECI coordinates
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posECEF: A 3x1 vector containing the x, y, and z ECEF positions,
respectively.
stepLength: The length of each time step of the simulation.
stepNum: The current step number of the simulation. This works with step
length to convert increasing steps through the simulation.
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posECI: A 3x1 vector containing the x, y, and z ECI positions,
respectively.
'''
sin = np.sin
cos = np.cos
omega = np.float64(7.2921158553e-5) #Earth rotation rate (radians/sec) ~SIDEREAL
#omega = 2*pi / (24*60*60)
T = np.array(
[[cos(omega*stepLength*stepNum), -(sin(omega*stepLength*stepNum)), 0.0],
[sin(omega*stepLength*stepNum), cos(omega*stepLength*stepNum), 0.0],
[0.0, 0.0, 1.0]],
dtype='float64'
)
posECI = T @ posECEF
return posECI
def ECEFtoLLA(posECEF):
'''
Function for converting ECEF coordinates to latitude/longitude/altitude,
using a closed formula set.
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posECEF: A 1x3 or 3x1 vector containing the x, y, and z ECEF positions,
respectively.
WGS: The WGS84 reference ellipsoid
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posLLA: A 3x1 vector containing the latitude, longitude, and altitude
positions in radians, respectively.
'''
sin = np.sin
cos = np.cos
WGS = cfg.WGS
# Ellipsoid properties
a = WGS["SemimajorAxis"] # Semimajor axis
b = WGS["SemiminorAxis"] # Semiminor axis
e = np.sqrt((a**2 - b**2) / a**2) # Square of (first) eccentricity
ePrime = np.sqrt((a**2 - b**2) / b**2) # Square of second eccentricity
xECEF = posECEF[0]
yECEF = posECEF[1]
zECEF = posECEF[2]
#Closed formula set
p = np.sqrt(xECEF**2+yECEF**2)
theta = np.arctan2((zECEF * a), (p * b))
lon = np.arctan2(yECEF,xECEF)
#lon = mod(lon,2*pi)
lat = np.arctan2((zECEF + (ePrime**2 * b * (sin(theta))**3)), (p - (e**2 * a * (cos(theta))**3)))
N = a / (np.sqrt(1 - e**2 * (sin(lat))**2))
alt = (p / cos(lat)) - N
posLLA = [lat], [lon], [alt]
return posLLA
def ECEFtoNED(posECEF, OriECEF, latOri, lonOri):
'''
Function for converting ECEF coordinates to local NED coordinates
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posECEF: A 1x3 or 3x1 vector containing the x, y, and z ECEF positions,
respectively.
OriECEF: A 1x3 or 3x1 vector containing the origin of the local NED frame
as xECEF, yECEF, and zECEF respectively.
latOri: The latitude of the origin of the local NED frame in radians.
lonOri: The longitude of the origin of the local NED frame in radians.
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posNED: A 3x1 vector containing the north, east, and down positions,
respectively.
'''
sin = np.sin
cos = np.cos
xObj = posECEF[0]
yObj = posECEF[1]
zObj = posECEF[2]
#Generate matrices for multiplication
rotationMatrix = np.array(
[[-(sin(lonOri)), cos(lonOri), 0.0],
[(-(sin(latOri))*cos(lonOri)), (-(sin(latOri))*sin(lonOri)), cos(latOri)],
[(cos(latOri)*cos(lonOri)), (cos(latOri)*sin(lonOri)), sin(latOri)]],
dtype='float64'
)
coordMatrix = [xObj - OriECEF[0]], [yObj - OriECEF[1]], [zObj - OriECEF[2]]
#Find ENU vector
ENU = rotationMatrix @ coordMatrix
#Convert ENU vector to NED vector
xNorth = ENU[1]
yEast = ENU[0]
zDown = -ENU[2]
posNED = [xNorth], [yEast], [zDown]
return posNED
def ECItoAER(posECI, stepLength, stepNum, OriECEF, latOri, lonOri):
'''
Function for converting ECI position to Azimuth/Elevation/Range
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posECI: A 3x1 vector containing the x, y, and z ECI positions,
respectively.
stepLength: The length of each time step of the simulation.
stepNum: The current step number of the simulation. This works with step
length to convert increasing steps through the simulation.
OriECEF: A 1x3 or 3x1 vector containing the origin of the local NED frame
as xECEF, yECEF, and zECEF respectively.
latOri: The latitude of the origin of the local NED frame in radians.
lonOri: The longitude of the origin of the local NED frame in radians.
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posAER: A 3x1 vector containing the azimuth, elevation, and range
positions in radians, respectively.
'''
sin = np.sin
cos = np.cos
omega = np.float64(7.2921158553e-5) #Earth rotation rate (radians/sec) SIDEREAL
#omega = 2*pi / (24*60*60)
rotationMatrix = np.array(
[[cos(stepNum*stepLength*omega), sin(stepNum*stepLength*omega), 0.0],
[-(sin(stepNum*stepLength*omega)), cos(stepNum*stepLength*omega), 0.0],
[0.0, 0.0, 1.0]],
dtype='float64'
)
posECI = np.reshape(posECI, (3,1))
#posECEF = np.matmul(rotationMatrix, posECI)
posECEF = rotationMatrix @ posECI
transformMatrix = np.array(
[[(-sin(latOri)*cos(lonOri)), (-sin(latOri)*sin(lonOri)), cos(latOri)],
[(-sin(lonOri)), cos(lonOri), 0.0],
[(-cos(latOri)*cos(lonOri)), (-cos(latOri)*sin(lonOri)), (-sin(latOri))]],
dtype='float64'
)
# transformMatrix = [(-sin(latOri)*cos(lonOri)), (-sin(latOri)*sin(lonOri)), cos(latOri)], [(-sin(lonOri)), cos(lonOri), 0.0], [(-cos(latOri)*cos(lonOri)), (-cos(latOri)*sin(lonOri)), (-sin(latOri))]
posDelta = posECEF - OriECEF
# posNED = np.matmul(transformMatrix, posDelta)
posNED = transformMatrix @ posDelta
#Convert ENU vector to NED vector
xNorth = np.float64(posNED[0])
yEast = np.float64(posNED[1])
zDown = np.float64(posNED[2])
r1 = np.hypot(xNorth, yEast)
ran = np.hypot(r1,zDown)
elevation = np.arctan2(-zDown,r1)
azimuth = np.mod(np.arctan2(yEast, xNorth),2*np.float64(np.pi))
posAER = np.array([[azimuth], [elevation], [ran]])
return posAER
def ECItoECEF(posECI, stepLength, stepNum):
'''
Function for converting ECI coordinates to ECEF coordinates
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posECI: A 3x1 vector containing the x, y, and z ECI positions,
respectively.
stepLength: The length of each time step of the simulation.
stepNum: The current step number of the simulation. This works with step
length to convert increasing steps through the simulation.
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posECEF: A 3x1 vector containing the x, y, and z ECEF positions,
respectively.
'''
sin = np.sin
cos = np.cos
omega = np.float64(7.2921158553e-5) #Earth rotation rate (radians/sec) ~SIDEREAL
#omega = 2*pi / (24*60*60)
T = np.array(
[[cos(omega*stepLength*stepNum), sin(omega*stepLength*stepNum), 0.0],
[-(sin(omega*stepLength*stepNum)), cos(omega*stepLength*stepNum), 0.0],
[0.0, 0.0, 1.0]],
dtype='float64'
)
posECEF = np.matmul(T, posECI)
return posECEF
def ECItoLLA(posECI, stepLength, stepNum):
'''
Function for converting ECI coordinates to latitude/longitude/altitude,
using a closed formula set.
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posECI: A 1x3 or 3x1 vector containing the x, y, and z ECEF positions,
respectively.
stepLength: The length of each time step of the simulation.
stepNum: The current step number of the simulation. This works with step
length to convert increasing steps through the simulation.
WGS: The WGS84 reference ellipsoid
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posLLA: A 3x1 vector containing the latitude, longitude, and altitude
positions in radians, respectively.
'''
sin = np.sin
cos = np.cos
WGS = cfg.WGS
omega = np.float64(7.2921158553e-5) #Earth rotation rate (radians/sec) ~SIDEREAL
#omega = 2*pi / (24*60*60)
# Ellipsoid properties
a = WGS["SemimajorAxis"] # Semimajor axis
b = WGS["SemiminorAxis"] # Semiminor axis
e = np.sqrt((a**2 - b**2) / a**2) # Square of (first) eccentricity
ePrime = np.sqrt((a**2 - b**2) / b**2) # Square of second eccentricity
rotationMatrix = np.array(
[[cos(stepNum*stepLength*omega), sin(stepNum*stepLength*omega), 0.0],
[-sin(stepNum*stepLength*omega), cos(stepNum*stepLength*omega), 0.0],
[0.0, 0.0, 1.0]],
dtype='float64'
)
posECEF = rotationMatrix @ posECI
xECEF = posECEF[1]
yECEF = posECEF[2]
zECEF = posECEF(3)
#Closed formula set
p = np.sqrt(xECEF**2+yECEF**2)
theta = np.arctan2((zECEF * a), (p * b))
lon = np.arctan2(yECEF,xECEF)
#lon = mod(lon,2*pi)
lat = np.arctan2((zECEF + (ePrime**2 * b * (sin(theta))**3)), (p - (e**2 * a * (cos(theta))**3)))
N = a / (np.sqrt(1 - e**2 * (sin(lat))**2))
alt = (p / cos(lat)) - N
posLLA = [lat], [lon], [alt]
return posLLA
def LLAtoAER(posLLA, OriECEF, latOri, lonOri):
'''
Function for converting Az/Elev/Range to Latitude/Longitude/Altitude
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posLLA: A 3x1 vector containing the latitude, longitude, and altitude
positions in radians, respectively.
OriECEF: A 1x3 or 3x1 vector containing the origin of the local NED frame
as xECEF, yECEF, and zECEF respectively.
latOri: The latitude of the origin of the local NED frame in radians.
lonOri: The longitude of the origin of the local NED frame in radians.
WGS: The WGS84 reference ellipsoid
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posAER: A 1x3 or 3x1 vector containing the Azimuth, Elevation, and Range
positions in radians, respectively.
'''
sin = np.sin
cos = np.cos
WGS = cfg.WGS
#Ellipsoid parameters
a = WGS["SemimajorAxis"]
b = WGS["SemiminorAxis"]
e = WGS["Eccentricity"]
lat = posLLA[0]
lon = posLLA[1]
alt = posLLA[2]
#Prime vertical radius of curvature N(phi)
#Formula if eccentricty not defined: NPhi = a**2 / (sqrt((a**2*(cos(lat)**2))+(b**2*(sin(lat)**2))))
NPhi = a / (np.sqrt(1 - (e**2*(sin(lat))**2)))
xECEF = (NPhi + alt) * cos(lat) * cos(lon)
yECEF = (NPhi + alt) * cos(lat) * sin(lon)
zECEF = (((b**2/a**2)*NPhi) + alt)*sin(lat)
#Generate matrices for multiplication
rotationMatrix = np.array(
[[-sin(lonOri), cos(lonOri), 0],
[(-sin(latOri)*cos(lonOri)), (-sin(latOri)*sin(lonOri)), cos(latOri)],
[(cos(latOri)*cos(lonOri)), (cos(latOri)*sin(lonOri)), sin(latOri)]],
dtype='float64'
)
coordMatrix = np.array(
[[xECEF - OriECEF[0]],
[yECEF - OriECEF[1]],
[zECEF - OriECEF[2]]],
dtype='float64'
)
#Find ENU vector
ENU = rotationMatrix @ coordMatrix
#Convert ENU vector to NED vector
xNorth = ENU[1]
yEast = ENU[0]
zDown = -ENU[2]
r1 = np.hypot(xNorth, yEast)
ran = np.hypot(r1,zDown)
elevation = np.arctan2(-zDown,r1)
azimuth = np.mod(np.arctan2(yEast, xNorth),2*np.pi)
posAER = [azimuth], [elevation], [ran]
return posAER
def LLAtoECEF(posLLA):
'''
Function for converting ECEF coordinates to latitude/longitude/altitude.
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posLLA: A 1x3 or 3x1 vector containing the latitude, longitude, and
altitude positions in radians, respectively.
WGS: The WGS84 reference ellipsoid
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posECEF: A 3x1 vector containing the x, y, and z ECEF positions,
respectively.
'''
sin = np.sin
cos = np.cos
WGS = cfg.WGS
#Ellipsoid parameters
a = WGS["SemimajorAxis"]
b = WGS["SemiminorAxis"]
e = WGS["Eccentricity"]
lat = posLLA[0]
lon = posLLA[1]
alt = posLLA[2]
#Prime vertical radius of curvature N(phi)
#Formula if eccentricty not defined: NPhi = a**2 / (sqrt((a**2*(cos(lat)**2))+(b**2*(sin(lat)**2))))
NPhi = a / (np.sqrt(1 - (e**2*(sin(lat))**2)))
xECEF = (NPhi + alt) * cos(lat) * cos(lon)
yECEF = (NPhi + alt) * cos(lat) * sin(lon)
zECEF = (((b**2/a**2)*NPhi) + alt)*sin(lat)
posECEF = np.array([[xECEF],
[yECEF],
[zECEF]])
return posECEF
def LLAtoECI(posLLA, stepLength, stepNum):
'''
Function for converting latitude/longitude/altitude to ECI coordinates.
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posLLA: A 1x3 or 3x1 vector containing the latitude, longitude, and
altitude positions in radians, respectively.
stepLength: The length of each time step of the simulation.
stepNum: The current step number of the simulation. This works with step
length to convert increasing steps through the simulation.
WGS: The WGS84 reference ellipsoid
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posECI: A 3x1 vector containing the x, y, and z ECI positions,
respectively.
'''
sin = np.sin
cos = np.cos
WGS = cfg.WGS
omega = np.float64(7.2921158553e-5) #Earth rotation rate (radians/sec) ~SIDEREAL
#Ellipsoid parameters
a = WGS["SemimajorAxis"]
b = WGS["SemiminorAxis"]
e = WGS["Eccentricity"]
lat = posLLA[0]
lon = posLLA[1]
alt = posLLA[2]
#Prime vertical radius of curvature N(phi)
#Formula if eccentricty not defined: NPhi = a**2 / (sqrt((a**2*(cos(lat)**2))+(b**2*(sin(lat)**2))))
NPhi = a / (np.sqrt(1 - (e**2*(sin(lat))**2)))
xECEF = (NPhi + alt) * cos(lat) * cos(lon)
yECEF = (NPhi + alt) * cos(lat) * sin(lon)
zECEF = (((b**2/a**2)*NPhi) + alt)*sin(lat)
posECEF = [xECEF], [yECEF], [zECEF]
#Generate matrices for multiplication
rotationMatrix = np.array(
[[cos(stepNum*stepLength*omega), -sin(stepNum*stepLength*omega), 0.0],
[sin(stepNum*stepLength*omega), cos(stepNum*stepLength*omega), 0.0],
[0.0, 0.0, 1.0]],
dtype='float64'
)
posECI = rotationMatrix @ posECEF
return posECI
def NEDtoAER(posNED):
'''
Function for converting local North/East/Down to Az/Elev/Range
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posNED: A 1x3 or 3x1 vector containing the north, east, and down positions,
respectively.
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posAER: A 3x1 vector containing the Azimuth, Elevation, and Range
positions in radians, respectively.
'''
xNorth = posNED[0]
yEast = posNED[1]
zDown = posNED[2]
r1 = np.hypot(xNorth, yEast)
ran = np.hypot(r1,zDown)
elevation = np.arctan2(-zDown,r1)
azimuth = np.mod(np.arctan2(yEast, xNorth),2*np.pi)
posAER = [azimuth], [elevation], [ran]
return posAER
def NEDtoECEF(posNED, OriECEF, latOri, lonOri):
'''
Function for converting ECEF coordinates to local NED coordinates
~~~~~~~~~~~~~~~~~INPUTS~~~~~~~~~~~~
posNED: A 1x3 or 3x1 vector containing the north, east, and down positions,
respectively.
OriECEF: A 1x3 or 3x1 vector containing the origin of the local NED frame
as xECEF, yECEF, and zECEF respectively.
latOri: The latitude of the origin of the local NED frame in radians.
lonOri: The longitude of the origin of the local NED frame in radians.
~~~~~~~~~~~~~~~OUTPUTS~~~~~~~~~~~~
posECEF: A 3x1 vector containing the x, y, and z ECEF positions,
respectively.
'''
sin = np.sin
cos = np.cos
cosPhi = cos(latOri)
sinPhi = sin(latOri)
cosLambda = cos(lonOri)
sinLambda = sin(lonOri)
xNorth = posNED[0]
yEast = posNED[1]
zDown = posNED[2]
t = cosPhi * -zDown - sinPhi * xNorth
dz = sinPhi * -zDown + cosPhi * xNorth
dx = cosLambda * t - sinLambda * yEast
dy = sinLambda * t + cosLambda * yEast
xECEF = OriECEF[0] + dx
yECEF = OriECEF[1] + dy
zECEF = OriECEF[2] + dz
posECEF = [xECEF], [yECEF], [zECEF]
return posECEF | {"hexsha": "3d55ef28468ec8e2d02c81b4cc22aa8dfbd108e4", "size": 22208, "ext": "py", "lang": "Python", "max_stars_repo_path": "pysatellite/Transformations.py", "max_stars_repo_name": "sgboakes/pysatellite", "max_stars_repo_head_hexsha": "743e6824bddf5d2cf760da699db27810d9e2f8f5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pysatellite/Transformations.py", "max_issues_repo_name": "sgboakes/pysatellite", "max_issues_repo_head_hexsha": "743e6824bddf5d2cf760da699db27810d9e2f8f5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pysatellite/Transformations.py", "max_forks_repo_name": "sgboakes/pysatellite", "max_forks_repo_head_hexsha": "743e6824bddf5d2cf760da699db27810d9e2f8f5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.4115983027, "max_line_length": 204, "alphanum_fraction": 0.5686239193, "include": true, "reason": "import numpy", "num_tokens": 6857} |
import numpy as np
from dgp_aepmcm.layers.output_layer_base import OutputLayerBase
from dgp_aepmcm.nodes.output_node_regression import OutputNodeRegression
class OutputLayerRegression(OutputLayerBase):
def __init__(
self, y_train_tf, y_test_tf, y_train_mean_tf, y_train_std_tf, n_samples, dtype=np.float32
):
OutputLayerBase.__init__(self, y_test_tf)
self.n_samples = n_samples
self.n_nodes = 1
output_node = OutputNodeRegression(
y_train_tf, y_test_tf, y_train_mean_tf, y_train_std_tf, n_samples, dtype
)
self.add_node(output_node)
def sample_from_predictive_distribution(self, samples_per_point=1):
return self.get_node_list()[0].sample_from_predictive_distribution(samples_per_point)
def get_predictive_distribution_fixed_x(self, y_values):
return self.get_node_list()[0].get_predictive_distribution_fixed_x(y_values)
def calculate_loglikehood_rmse(self):
""" Calculates LL and RMSE for a regression problem
As the dimension of the Y variables should be 1
this layer should only have 1 node
"""
return self.get_node_list()[0].calculate_loglikehood_rmse()
| {"hexsha": "ffcaf7d69210291c72d7db91c58ea27cc3ece0db", "size": 1209, "ext": "py", "lang": "Python", "max_stars_repo_path": "dgp_aepmcm/layers/output_layer_regression.py", "max_stars_repo_name": "Gonzalo933/dgp-aepmcm", "max_stars_repo_head_hexsha": "d4aca49a5072daee0a7b9a906827d7c7b3179ec0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dgp_aepmcm/layers/output_layer_regression.py", "max_issues_repo_name": "Gonzalo933/dgp-aepmcm", "max_issues_repo_head_hexsha": "d4aca49a5072daee0a7b9a906827d7c7b3179ec0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dgp_aepmcm/layers/output_layer_regression.py", "max_forks_repo_name": "Gonzalo933/dgp-aepmcm", "max_forks_repo_head_hexsha": "d4aca49a5072daee0a7b9a906827d7c7b3179ec0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.78125, "max_line_length": 97, "alphanum_fraction": 0.7402812242, "include": true, "reason": "import numpy", "num_tokens": 278} |
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The TensorBoard Scalars plugin.
See `http_api.md` in this directory for specifications of the routes for
this plugin.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import six
from six import StringIO
from werkzeug import wrappers
import numpy as np
from tensorboard import errors
from tensorboard import plugin_util
from tensorboard.backend import http_util
from tensorboard.data import provider
from tensorboard.plugins import base_plugin
from tensorboard.plugins.scalar import metadata
from tensorboard.util import tensor_util
_DEFAULT_DOWNSAMPLING = 1000 # scalars per time series
class OutputFormat(object):
"""An enum used to list the valid output formats for API calls."""
JSON = "json"
CSV = "csv"
class ScalarsPlugin(base_plugin.TBPlugin):
"""Scalars Plugin for TensorBoard."""
plugin_name = metadata.PLUGIN_NAME
def __init__(self, context):
"""Instantiates ScalarsPlugin via TensorBoard core.
Args:
context: A base_plugin.TBContext instance.
"""
self._downsample_to = (context.sampling_hints or {}).get(
self.plugin_name, _DEFAULT_DOWNSAMPLING
)
self._data_provider = context.data_provider
def get_plugin_apps(self):
return {
"/scalars": self.scalars_route,
"/tags": self.tags_route,
}
def is_active(self):
return False # `list_plugins` as called by TB core suffices
def frontend_metadata(self):
return base_plugin.FrontendMetadata(element_name="tf-scalar-dashboard")
def index_impl(self, ctx, experiment=None):
"""Return {runName: {tagName: {displayName: ..., description:
...}}}."""
mapping = self._data_provider.list_scalars(
ctx, experiment_id=experiment, plugin_name=metadata.PLUGIN_NAME,
)
result = {run: {} for run in mapping}
for (run, tag_to_content) in six.iteritems(mapping):
for (tag, metadatum) in six.iteritems(tag_to_content):
description = plugin_util.markdown_to_safe_html(
metadatum.description
)
result[run][tag] = {
"displayName": metadatum.display_name,
"description": description,
}
return result
def scalars_impl(self, ctx, tag, run, experiment, output_format):
"""Result of the form `(body, mime_type)`."""
all_scalars = self._data_provider.read_scalars(
ctx,
experiment_id=experiment,
plugin_name=metadata.PLUGIN_NAME,
downsample=self._downsample_to,
run_tag_filter=provider.RunTagFilter(runs=[run], tags=[tag]),
)
scalars = all_scalars.get(run, {}).get(tag, None)
if scalars is None:
raise errors.NotFoundError(
"No scalar data for run=%r, tag=%r" % (run, tag)
)
values = [(x.wall_time, x.step, x.value) for x in scalars]
if output_format == OutputFormat.CSV:
string_io = StringIO()
writer = csv.writer(string_io)
writer.writerow(["Wall time", "Step", "Value"])
writer.writerows(values)
return (string_io.getvalue(), "text/csv")
else:
return (values, "application/json")
@wrappers.Request.application
def tags_route(self, request):
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
index = self.index_impl(ctx, experiment=experiment)
return http_util.Respond(request, index, "application/json")
@wrappers.Request.application
def scalars_route(self, request):
"""Given a tag and single run, return array of ScalarEvents."""
tag = request.args.get("tag")
run = request.args.get("run")
ctx = plugin_util.context(request.environ)
experiment = plugin_util.experiment_id(request.environ)
output_format = request.args.get("format")
(body, mime_type) = self.scalars_impl(
ctx, tag, run, experiment, output_format
)
return http_util.Respond(request, body, mime_type)
| {"hexsha": "fc1262d2e00618c8f06ef4ca02d3e3ab4748a18c", "size": 4997, "ext": "py", "lang": "Python", "max_stars_repo_path": "Lib/site-packages/tensorboard/plugins/scalar/scalars_plugin.py", "max_stars_repo_name": "caiyongji/tf2.3.1-py3.7.9-full-built", "max_stars_repo_head_hexsha": "ace4efcbf05b2b494388739718a18c13eab83c71", "max_stars_repo_licenses": ["CNRI-Python-GPL-Compatible"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-09-02T16:13:51.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-05T08:45:59.000Z", "max_issues_repo_path": "Lib/site-packages/tensorboard/plugins/scalar/scalars_plugin.py", "max_issues_repo_name": "caiyongji/tf2.3.1-py3.7.9-full-built", "max_issues_repo_head_hexsha": "ace4efcbf05b2b494388739718a18c13eab83c71", "max_issues_repo_licenses": ["CNRI-Python-GPL-Compatible"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Lib/site-packages/tensorboard/plugins/scalar/scalars_plugin.py", "max_forks_repo_name": "caiyongji/tf2.3.1-py3.7.9-full-built", "max_forks_repo_head_hexsha": "ace4efcbf05b2b494388739718a18c13eab83c71", "max_forks_repo_licenses": ["CNRI-Python-GPL-Compatible"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-09-17T13:16:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-17T13:16:30.000Z", "avg_line_length": 35.4397163121, "max_line_length": 80, "alphanum_fraction": 0.6495897539, "include": true, "reason": "import numpy", "num_tokens": 1052} |
c
c * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c * *
c * copyright (c) 1998 by UCAR *
c * *
c * University Corporation for Atmospheric Research *
c * *
c * all rights reserved *
c * *
c * SPHEREPACK version 3.2 *
c * *
c * A Package of Fortran77 Subroutines and Programs *
c * *
c * for Modeling Geophysical Processes *
c * *
c * by *
c * *
c * John Adams and Paul Swarztrauber *
c * *
c * of *
c * *
c * the National Center for Atmospheric Research *
c * *
c * Boulder, Colorado (80307) U.S.A. *
c * *
c * which is sponsored by *
c * *
c * the National Science Foundation *
c * *
c * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
c
c
c ... file trvsph.f
c
c this file contains documentation and code for subroutine trvsph
c
c ... required files
c
c sphcom.f, hrfft.f, gaqd.f, vhaec.f, vhsec.f, vhagc.f, vhsgc.f
c
c subroutine trvsph (intl,igrida,nlona,nlata,iveca,ua,va,
c +igridb,nlonb,nlatb,ivecb,ub,vb,wsave,lsave,lsvmin,work,
c +lwork,lwkmin,dwork,ldwork,ier)
c
c *** author
c
c John C. Adams (NCAR 1997), email: johnad@ncar.ucar.edu
c
c *** purpose
c
c subroutine trvsph transfers vector data given in (ua,va) on a grid on
c the full sphere to vector data in (ub,vb) on a grid on the full sphere.
c the grids on which (ua,va) is given and (ub,vb) is generated can be
c specified independently of each other (see the input arguments igrida,
c igridb,iveca,ivecb). ua and ub are the east longitudinal components of
c the given and transformed vector fields. va is either the latitudinal
c or colatitudinal component of the given vector field (see iveca).
c vb is either the latitudinal or colatitudinal component of the
c transformed vector field (see ivecb). for transferring scalar data
c on the sphere, use subroutine trssph.
c
c * notice that scalar and vector quantities are fundamentally different
c on the sphere. for example, vectors are discontinuous and multiple
c valued at the poles. scalars are continuous and single valued at the
c poles. erroneous results would be produced if one attempted to transfer
c vector fields between grids with subroutine trssph applied to each
c component of the vector.
c
c *** underlying grid assumptions and a description
c
c discussions with the ncar scd data support group and others indicate
c there is no standard grid for storing observational or model generated
c data on the sphere. subroutine trvsph was designed to handle most
c cases likely to be encountered when moving data from one grid format
c to another.
c
c the grid on which (ua,va) is given must be equally spaced in longitude
c and either equally spaced or gaussian in latitude (or colatitude).
c longitude, which can be either the first or second dimension of ua,va
c subdivides [0,2pi) excluding the periodic point 2pi. (co)latitude,
c which can be the second or first dimension of ua,va, has south
c to north or north to south orientation with increasing subscript
c value in ua,va (see the argument igrida).
c
c the grid on which ub,vb is generated must be equally spaced in longitude
c and either equally spaced or gaussian in latitude (or colatitude).
c longitude, which can be either the first or second dimension of ub,vb
c subdivides [0,2pi) excluding the periodic point 2pi. (co)latitude,
c which can be the second or first dimension of ub,vb, has south
c to north or north to south orientation with increasing subscript
c value in db (see the argument igridb).
c
c let nlon be either nlona or nlonb (the number of grid points in
c longitude. the longitude grid subdivides [0,2pi) into nlon spaced
c points
c
c (j-1)*2.*pi/nlon (j=1,...,nlon).
c
c it is not necessary to communicate to subroutine trvsph whether the
c underlying grids are in latitude or colatitude. it is only necessary
c to communicate whether they run south to north or north to south with
c increasing subscripts. a brief discussion of latitude and colatitude
c follows. equally spaced latitude grids are assumed to subdivide
c [-pi/2,pi/2] with the south pole at -pi/2 and north pole at pi/2.
c equally spaced colatitude grids subdivide [0,pi] with the north pole
c at 0 and south pole at pi. equally spaced partitions on the sphere
c include both poles. gaussian latitude grids subdivide (-pi/2,pi/2)
c and gaussian colatitude grids subdivide (0,pi). gaussian grids do not
c include the poles. the gaussian grid points are uniquely determined by
c the size of the partition. they can be computed in colatitude in
c (0,pi) (north to south) in double precision by the spherepack subroutine
c gaqd. let nlat be nlata or nlatb if either the ua,va or ub,vb grid is
c gaussian. let
c
c north pole south pole
c ---------- ----------
c 0.0 < cth(1) < ... < cth(nlat) < pi
c
c
c be nlat gaussian colatitude points in the interval (0,pi) and let
c
c south pole north pole
c ---------- ----------
c -pi/2 < th(1) < ... < th(nlat) < pi/2
c
c be nlat gaussian latitude points in the open interval (-pi/2,pi/2).
c these are related by
c
c th(i) = -pi/2 + cth(i) (i=1,...,nlat)
c
c if the (ua,va) or (ub,vb) grid is equally spaced in (co)latitude then
c
c ctht(i) = (i-1)*pi/(nlat-1)
c (i=1,...,nlat)
c tht(i) = -pi/2 + (i-1)*pi/(nlat-1)
c
c define the equally spaced (north to south) colatitude and (south to
c north) latitude grids.
c
c *** method (simplified description)
c
c (1)
c
c the vector field (ua,va) is reformated to a vector field in mathematical
c spherical coordinates using array transpositions, subscript reordering
c and negation of va as necessary (see arguments igrida,iveca).
c
c (2)
c
c a vector harmonic analysis is performed on the result from (1)
c
c (3)
c
c a vector harmonic synthesis is performed on the (ub,vb) grid
c using as many coefficients from (2) as possible (i.e., as
c as is consistent with the size of the ub,vb grid).
c
c (4)
c
c the vector field generated in (3) is transformed from mathematical
c spherical coordinates to the form flagged by ivecb and igridb in
c (ub,vb) using array transpositions, subscript reordering and negation
c as necessary
c
c
c *** advantages
c
c the use of vector spherical harmonics to transfer vector data is
c highly accurate and preserves properties of vectors on the sphere.
c the method produces a weighted least squares fit to vector data in
c which waves are resolved uniformly on the full sphere. high frequencies
c induced by closeness of grid points near the poles (due to computational
c or observational errors) are smoothed. the method is consistent with
c methods used to generate vector data in numerical spectral models based
c on spherical harmonics. for more discussion of these and related issues,
c see "on the spectral approximation of discrete scalar and vector
c functions on the sphere," siam j. numer. anal., vol. 16, december 1979,
c pp. 934-949, by paul swarztrauber.
c
c
c *** comment
c
c on a nlon by nlat or nlat by nlon grid (gaussian or equally spaced)
c spherical harmonic analysis generates and synthesis utilizes
c min0(nlat,(nlon+2)/2)) by nlat coefficients. consequently, for
c ua,va and ub,vb, if either
c
c min0(nlatb,(nlonb+2)/2) < min0(nlata,(nlona+2)/2)
c
c or if
c
c nlatb < nlata
c
c then all the coefficients generated by an analysis of ua,va cannot be
c used in the synthesis which generates ub,vb. in this case "information"
c can be lost in generating ub,vb. more precisely, information will be
c lost if the analysis of ua,va yields nonzero coefficients which are
c outside the coefficient bounds determined by the ub,vb grid. still
c transference with vector spherical harmonics will yield results
c consistent with grid resolution and is highly accurate.
c
c *** input arguments
c
c ... intl
c
c an initialization argument which should be zero on an initial call to
c trvsph. intl should be one if trvsph is being recalled and
c
c igrida,nlona,nlata,iveca,igridb,nlonb,nlatb,ivecb
c
c have not changed from the previous call. if any of these arguments have
c changed intl=0 must be used to avoid undetectable errors. when allowed,
c calls with intl=1 bypass redundant computation and save time. it can
c be used when transferring multiple vector data sets with the same
c underlying grids.
c
c ... igrida
c
c an integer vector dimensioned two which identifies the underlying grid
c on the full sphere for the given vector data (ua,va) as follows:
c
c igrida(1)
c
c = -1
c if the latitude (or colatitude) grid for ua,va is an equally spaced
c partition of [-pi/2,pi/2] ( or [0,pi] ) including the poles which
c runs north to south with increasing subscript value
c
c = +1
c if the latitude (or colatitude) grid for ua,va is an equally spaced
c partition of [-pi/2,pi/2] ( or [0,pi] ) including the poles which
c runs south to north with increasing subscript value
c
c = -2
c if the latitude (or colatitude) grid for ua,va is a gaussian partition
c of (-pi/2,pi/2) ( or (0,pi) ) excluding the poles which runs north
c to south with increasing subscript value
c
c = +2
c if the latitude (or colatitude) grid for ua,va is a gaussian partition
c of (-pi/2,pi/2) ( or (0,pi) ) excluding the poles which runs south
c north with increasing subscript value
c
c igrida(2)
c
c = 0 if the underlying grid for ua,va is a nlona by nlata
c
c = 1 if the underlying grid for ua,va is a nlata by nlona
c
c
c ... nlona
c
c the number of longitude points on the uniform grid which partitions
c [0,2pi) for the given vector (ua,va). nlona is also the first or second
c dimension of ua,va (see igrida(2)) in the program which calls trvsph.
c nlona determines the grid increment in longitude as 2*pi/nlona. for
c example nlona = 72 for a five degree grid. nlona must be greater than
c or equal to 4. the efficiency of the computation is improved when
c nlona is a product of small prime numbers
c
c ... nlata
c
c the number of points in the latitude (or colatitude) grid for the
c given vector (ua,va). nlata is also the first or second dimension
c of ua and va (see igrida(2)) in the program which calls trvsph.
c if nlata is odd then the equator will be located at the (nlata+1)/2
c gaussian grid point. if nlata is even then the equator will be
c located half way between the nlata/2 and nlata/2+1 grid points.
c
c ... iveca
c
c if iveca=0 is input then va is the latitudinal component of the
c given vector field. if iveca=1 then va is the colatitudinal
c compoenent of the given vector field. in either case, ua must
c be the east longitudinal component of the given vector field.
c
c *** note:
c igrida(1)=-1 or igrida(1)=-2, igrida(2)=1, and iveca=1 corresponds
c to the "usual" mathematical spherical coordinate system required
c by most of the drivers in spherepack2. igrida(1)=1 or igrida(1)=2,
c igrida(2)=0, and iveca=0 corresponds to the "usual" geophysical
c spherical coordinate system.
c
c
c ... ua
c
c ua is the east longitudinal component of the given vector field.
c ua must be dimensioned nlona by nlata in the program calling trvsph if
c igrida(2) = 0. ua must be dimensioned nlata by nlona in the program
c calling trvsph if igrida(2) = 1. if ua is not properly dimensioned
c and if the latitude (colatitude) values do not run south to north or
c north to south as flagged by igrida(1) (this cannot be checked!) then
c incorrect results will be produced.
c
c
c ... va
c
c va is either the latitudinal or colatitudinal componenet of the
c given vector field (see iveca). va must be dimensioned nlona by
c nlata in the program calling trvsph if igrida(2)=0. va must be
c dimensioned nlata by nlona in the program calling trvsph if
c igrida(2)=1. if va is not properly dimensioned or if the latitude
c (colatitude) values do not run south to north or north to south
c as flagged by igrida(1) (this cannot be checked!) then incorrect
c results will be produced.
c
c ... igridb
c
c an integer vector dimensioned two which identifies the underlying grid
c on the full sphere for the transformed vector (ub,vb) as follows:
c
c igridb(1)
c
c = -1
c if the latitude (or colatitude) grid for ub,vb is an equally spaced
c partition of [-pi/2,pi/2] ( or [0,pi] ) including the poles which
c north to south
c
c = +1
c if the latitude (or colatitude) grid for ub,vb is an equally spaced
c partition of [-pi/2,pi/2] ( or [0,pi] ) including the poles which
c south to north
c
c = -2
c if the latitude (or colatitude) grid for ub,vb is a gaussian partition
c of (-pi/2,pi/2) ( or (0,pi) ) excluding the poles which runs north to
c south
c
c = +2
c if the latitude (or colatitude) grid for ub,vb is a gaussian partition
c of (-pi/2,pi/2) ( or (0,pi) ) excluding the poles which runs south to
c north
c
c igridb(2)
c
c = 0 if the underlying grid for ub,vb is a nlonb by nlatb
c
c = 1 if the underlying grid for ub,vb is a nlatb by nlonb
c
c
c ... nlonb
c
c the number of longitude points on the uniform grid which partitions
c [0,2pi) for the transformed vector (ub,vb). nlonb is also the first or
c second dimension of ub and vb (see igridb(2)) in the program which calls
c trvsph. nlonb determines the grid increment in longitude as 2*pi/nlonb.
c for example nlonb = 72 for a five degree grid. nlonb must be greater
c than or equal to 4. the efficiency of the computation is improved when
c nlonb is a product of small prime numbers
c
c ... nlatb
c
c the number of points in the latitude (or colatitude) grid for the
c transformed vector (ub,vb). nlatb is also the first or second dimension
c of ub and vb (see igridb(2)) in the program which calls trvsph.
c if nlatb is odd then the equator will be located at the (nlatb+1)/2
c gaussian grid point. if nlatb is even then the equator will be
c located half way between the nlatb/2 and nlatb/2+1 grid points.
c
c ... ivecb
c
c if ivecb=0 is input then vb is the latitudinal component of the
c given vector field. if ivecb=1 then vb is the colatitudinal
c compoenent of the given vector field. in either case, ub must
c be the east longitudinal component of the given vector field.
c
c *** note:
c igridb(1)=-1 or igridb(1)=-2, igridb(2)=1, and ivecb=1 corresponds
c to the "usual" mathematical spherical coordinate system required
c by most of the drivers in spherepack2. igridb(1)=1 or igridb(1)=2,
c igridb(2)=0, and ivecb=0 corresponds to the "usual" geophysical
c spherical coordinate system.
c
c ... wsave
c
c a saved work space array that can be utilized repeatedly by trvsph
c as long as the arguments nlata,nlona,nlatb,nlonb remain unchanged.
c wsave is set by a intl=0 call to trvsph. wsave must not be altered
c when trvsph is being recalled with intl=1.
c
c ... lsave
c
c the dimension of the work space wsave as it appears in the program
c that calls trvsph. the minimum required value of lsave for the
c current set of input arguments is set in the output argument lsvmin.
c it can be determined by calling trvsph with lsave=0 and printing lsvmin.
c
c la1 = min0(nlata,(nlona+1)/2), la2 = (nlata+1)/2
c
c lb1 = min0(nlatb,(nlonb+1)/2), lb2 = (nlatb+1)/2
c
c lwa = 4*nlata*la2+3*max0(la1-2,0)*(2*nlata-la1-1)+la2+nlona+15
c
c lwb = 4*nlatb*lb2+3*max0(lb1-2,0)*(2*nlatb-lb1-1)+nlonb+15
c
c then
c
c lsvmin = lwa + lwb
c
c is the minimal required work space length of wsave
c
c
c ... work
c
c a work array that does not have to be preserved
c
c ... lwork
c
c the dimension of the array work as it appears in the program that
c calls trvsph. the minimum required value of lwork for the current
c set of input arguments is set in the output argument lwkmin.
c it can be determined by calling trvsph with lwork=0 and printing
c lwkmin. an estimate for lwork follows. let nlat = max0(nlata,nlatb),
c nlon = max0(nlona,nlonb) and l1 = min0(nlat,(nlon+2)/2). with these
c these definitions, the quantity
c
c 2*nlat*(8*l1 + 4*nlon + 3)
c
c will suffice as a length for the unsaved work space. this formula
c may overestimate the required minimum value for lwork. the exact
c minimum value can be predetermined by calling trvsph wtih lwork=0
c and printout of lwkmin.
c
c ... dwork
c
c a double precision work array that does not have to be preserved.
c
c ... ldwork
c
c the length of dwork in the routine calling trvsph
c Let
c
c nlat = max0(nlata,nlatb)
c
c ldwork must be at least 2*nlat*(nlat+1)+1
c
c
c *** output arguments
c
c
c ... ub
c
c a two dimensional array that contains the east longitudinal component
c of the transformed vector data. ub
c must be dimensioned nlonb by nlatb in the program calling trvsph if
c igridb(2)=0. ub must be dimensioned nlatb by nlonb in the program
c calling trvsph if igridb(2)=1. if ub is not properly dimensioned
c and if the latitude (colatitude) values do not run south to north or
c north to south as flagged by igrdb(1) (this cannot be checked!) then
c incorrect results will be produced.
c
c
c ... vb
c
c a two dimensional array that contains the latitudinal or colatitudinal
c component of the transformed vector data (see ivecb).
c vb must be dimensioned nlonb by nlatb in the program calling trvsph if
c igridb(2)=0. vb must be dimensioned nlatb by nlonb in the program
c calling trvsph if igridb(2)=1. if vb is not properly dimensioned
c and if the latitude (colatitude) values do not run south to north or
c north to south as flagged by igrdb(1) (this cannot be checked!) then
c incorrect results will be produced.
c
c ... lsvmin
c
c the minimum length of the saved work space in wsave.
c lsvmin is computed even if lsave < lsvmin (ier = 10).
c
c ... lwkmin
c
c the minimum length of the unsaved work space in work.
c lwkmin is computed even if lwork < lwkmin (ier = 11).
c
c
c *** error argument
c
c ... ier = 0 if no errors are detected
c
c = 1 if intl is not 0 or 1
c
c = 2 if igrida(1) is not -1 or +1 or -2 or +2
c
c = 3 if igrida(2) is not 0 or 1
c
c = 4 if nlona is less than 4
c
c = 5 if nlata is less than 3
c
c = 6 if iveca is not 0 or 1
c
c = 7 if igridb(1) is not -1 or +1 or -2 or +2
c
c = 8 if igridb(2) is not 0 or 1
c
c = 9 if nlonb is less than 4
c
c =10 if nlatb is less than 3
c
c =11 if ivecb is not 0 or 1
c
c =12 if there is insufficient saved work space (lsave < lsvmin)
c
c =13 if there is insufficient unsaved work space (lwork < lwkmin)
c
c =14 indicates failure in an eigenvalue routine which computes
c gaussian weights and points
c
c =15 if ldwork is too small (insufficient double precision
c unsaved work space)
c
c *****************************************************
c *****************************************************
c
c end of argument description ... code follows
c
c *****************************************************
c *****************************************************
c
subroutine trvsph (intl,igrida,nlona,nlata,iveca,ua,va,
+igridb,nlonb,nlatb,ivecb,ub,vb,wsave,lsave,lsvmin,work,
+lwork,lwkmin,dwork,ldwork,ier)
implicit none
integer intl,igrida(2),nlona,nlata,igridb(2),nlonb,nlatb
integer iveca,ivecb,lsave,lsvmin,lwork,lwkmin,ldwork,ier
real ua(*),va(*),ub(*),vb(*),wsave(*),work(*)
double precision dwork(*)
integer ig,igrda,igrdb,la1,la2,lb1,lb2,lwa,lwb
integer iabr,iabi,iacr,iaci,ibbr,ibbi,ibcr,ibci
integer nlat,lwk1,lwk2,lw,iw,jb,nt,ityp
c
c include a save statement to ensure local variables in trvsph, set during
c an intl=0 call, are preserved if trvsph is recalled with intl=1
c
save
c
c check input arguments
c
ier = 1
if (intl*(intl-1).ne.0) return
ier = 2
ig = igrida(1)
if ((ig-1)*(ig+1)*(ig-2)*(ig+2).ne.0) return
ier = 3
ig = igrida(2)
if (ig*(ig-1).ne.0) return
ier = 4
if (nlona .lt. 4) return
ier = 5
if (nlata .lt.3) return
ier = 6
if (iveca*(iveca-1).ne.0) return
ier = 7
ig = igridb(1)
if ((ig-1)*(ig+1)*(ig-2)*(ig+2).ne.0) return
ier = 8
ig = igridb(2)
if (ig*(ig-1).ne.0) return
ier = 9
if (nlonb .lt.4) return
ier = 10
if (nlatb .lt.3) return
ier = 11
if (ivecb*(ivecb-1).ne.0) return
ier = 0
igrda = iabs(igrida(1))
igrdb = iabs(igridb(1))
if (intl.eq.0) then
la1 = min0(nlata,(nlona+1)/2)
la2 = (nlata+1)/2
lb1 = min0(nlatb,(nlonb+1)/2)
lb2 = (nlatb+1)/2
c
c saved space for analysis on a grid
c
lwa = 4*nlata*la2+3*max0(la1-2,0)*(2*nlata-la1-1)+la2+nlona+15
c
c set saved work space length for synthesis on b grid
c
lwb = 4*nlatb*lb2+3*max0(lb1-2,0)*(2*nlatb-lb1-1)+nlonb+15
c
c set minimum required saved work space length
c
lsvmin = lwa + lwb
c
c set wsave pointer
c
jb = 1+lwa
c
c set pointers for vector spherical harmonic coefs in work
c
iabr = 1
iabi = iabr + la1*nlata
iacr = iabi + la1*nlata
iaci = iacr + la1*nlata
ibbr = iaci + la1*nlata
ibbi = ibbr + lb1*nlatb
ibcr = ibbi + lb1*nlatb
ibci = ibcr + lb1*nlatb
c
c set pointers for remaining work
c
iw = ibci + lb1*nlatb
c
c set remaining work space length in lw
c
lw = lwork - iw
c
c compute unsaved space for analysis and synthesis
c
lwk1 = 2*nlata*(2*nlona+max0(6*la2,nlona))
lwk2 = 2*nlatb*(2*nlonb+max0(6*lb2,nlonb))
c
c set minimum unsaved work space required by trvsph
c
lwkmin = iw + max0(lwk1,lwk2)
c
c set error flags if saved or unsaved work space is insufficient
c
ier = 12
if (lsave .lt. lsvmin) return
ier = 13
if (lwork .lt. lwkmin) return
ier = 15
nlat = max0(nlata,nlatb)
if (ldwork .lt. 2*nlat*(nlat+1)+1) return
ier = 0
if (igrda .eq. 1) then
c
c initialize wsave for equally spaced analysis
c
call vhaeci(nlata,nlona,wsave,lwa,dwork,ldwork,ier)
else
c
c initialize wsave for gaussian analysis
c
call vhagci(nlata,nlona,wsave,lwa,dwork,ldwork,ier)
if (ier.ne.0) then
c
c flag failure in spherepack gaussian software
c
ier = 14
return
end if
end if
if (igrdb .eq. 2) then
c
c initialize wsave for gaussian synthesis
c
call vhsgci(nlatb,nlonb,wsave(jb),lwb,dwork,ldwork,ier)
if (ier.ne.0) then
c
c flag failure in spherepack gaussian software
c
ier = 14
return
end if
else
c
c initialize wsave for equally spaced synthesis
c
call vhseci(nlatb,nlonb,wsave(jb),lwb,dwork,ldwork,ier)
end if
c
c end of initialization (intl=0) call
c
end if
c
c convert the vector field (ua,va) to mathematical spherical coordinates
c
if (igrida(2).eq.0) then
call trvplat(nlona,nlata,ua,work)
call trvplat(nlona,nlata,va,work)
end if
if (igrida(1) .gt. 0) then
call covlat(nlata,nlona,ua)
call covlat(nlata,nlona,va)
end if
if (iveca .eq. 0) then
call negv(nlata,nlona,va)
end if
nt = 1
ityp = 0
c
c analyze vector field
c
if (igrda .eq. 2) then
call vhagc(nlata,nlona,ityp,nt,va,ua,nlata,nlona,work(iabr),
+ work(iabi),work(iacr),work(iaci),la1,nlata,wsave,lwa,work(iw),
+ lw,ier)
else
call vhaec(nlata,nlona,ityp,nt,va,ua,nlata,nlona,work(iabr),
+ work(iabi),work(iacr),work(iaci),la1,nlata,wsave,lwa,work(iw),
+ lw,ier)
end if
c
c transfer a grid coefficients to b grid coefficients
c
call trvab(la1,nlata,work(iabr),work(iabi),work(iacr),work(iaci),
+ lb1,nlatb,work(ibbr),work(ibbi),work(ibcr),work(ibci))
c
c synthesize on b grid
c
if (igrdb .eq. 1) then
call vhsec(nlatb,nlonb,ityp,nt,vb,ub,nlatb,nlonb,work(ibbr),
+work(ibbi),work(ibcr),work(ibci),lb1,nlatb,wsave(jb),lwb,
+work(iw),lw,ier)
else
call vhsgc(nlatb,nlonb,ityp,nt,vb,ub,nlatb,nlonb,work(ibbr),
+work(ibbi),work(ibcr),work(ibci),lb1,nlatb,wsave(jb),lwb,work(iw),
+lw,ier)
end if
c
c restore a grid and b grid vector fields (now in math coordinates) to
c agree with grid flags in igrida,iveca,igridb,ivecb
c
if (iveca .eq. 0) then
call negv(nlata,nlona,va)
end if
if (ivecb .eq. 0) then
call negv(nlatb,nlonb,vb)
end if
if (igrida(1).gt. 0) then
call covlat(nlata,nlona,ua)
call covlat(nlata,nlona,va)
end if
if (igridb(1) .gt. 0) then
call covlat(nlatb,nlonb,ub)
call covlat(nlatb,nlonb,vb)
end if
if (igrida(2) .eq. 0) then
call trvplat(nlata,nlona,ua,work)
call trvplat(nlata,nlona,va,work)
end if
if (igridb(2) .eq. 0) then
call trvplat(nlatb,nlonb,ub,work)
call trvplat(nlatb,nlonb,vb,work)
end if
return
end
subroutine negv(nlat,nlon,v)
c
c negate (co)latitudinal vector componenet
c
implicit none
integer nlat,nlon,i,j
real v(nlat,nlon)
do j=1,nlon
do i=1,nlat
v(i,j) = -v(i,j)
end do
end do
return
end
subroutine trvab(ma,na,abr,abi,acr,aci,mb,nb,bbr,bbi,bcr,bci)
implicit none
integer ma,na,mb,nb,i,j,m,n
real abr(ma,na),abi(ma,na),acr(ma,na),aci(ma,na)
real bbr(mb,nb),bbi(mb,nb),bcr(mb,nb),bci(mb,nb)
c
c set coefficients for b grid from coefficients for a grid
c
m = min0(ma,mb)
n = min0(na,nb)
do j=1,n
do i=1,m
bbr(i,j) = abr(i,j)
bbi(i,j) = abi(i,j)
bcr(i,j) = acr(i,j)
bci(i,j) = aci(i,j)
end do
end do
c
c set coefs outside triangle to zero
c
do i=m+1,mb
do j=1,nb
bbr(i,j) = 0.0
bbi(i,j) = 0.0
bcr(i,j) = 0.0
bci(i,j) = 0.0
end do
end do
do j=n+1,nb
do i=1,mb
bbr(i,j) = 0.0
bbi(i,j) = 0.0
bcr(i,j) = 0.0
bci(i,j) = 0.0
end do
end do
return
end
subroutine trvplat(n,m,data,work)
c
c transpose the n by m array data to a m by n array data
c work must be at least n*m words long
c
implicit none
integer n,m,i,j,ij,ji
real data(*),work(*)
do j=1,m
do i=1,n
ij = (j-1)*n+i
work(ij) = data(ij)
end do
end do
do i=1,n
do j=1,m
ji = (i-1)*m+j
ij = (j-1)*n+i
data(ji) = work(ij)
end do
end do
return
end
subroutine covlat(nlat,nlon,data)
c
c reverse order of latitude (colatitude) grids
c
implicit none
integer nlat,nlon,nlat2,i,ib,j
real data(nlat,nlon),temp
nlat2 = nlat/2
do i=1,nlat2
ib = nlat-i+1
do j=1,nlon
temp = data(i,j)
data(i,j) = data(ib,j)
data(ib,j) = temp
end do
end do
return
end
| {"hexsha": "91cea58f3afcec4c1365c8f9b0b992462f386b8a", "size": 29231, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "install/spherepack3.2/src/trvsph.f", "max_stars_repo_name": "comp-physics/RBC3D", "max_stars_repo_head_hexsha": "2891a1e3b356e2cff0cfa74ba26d4aef3336641d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "install/spherepack3.2/src/trvsph.f", "max_issues_repo_name": "comp-physics/RBC3D", "max_issues_repo_head_hexsha": "2891a1e3b356e2cff0cfa74ba26d4aef3336641d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "install/spherepack3.2/src/trvsph.f", "max_forks_repo_name": "comp-physics/RBC3D", "max_forks_repo_head_hexsha": "2891a1e3b356e2cff0cfa74ba26d4aef3336641d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.0071856287, "max_line_length": 79, "alphanum_fraction": 0.6224898224, "num_tokens": 9119} |
import numpy as np
import cv2
from ellipse import Ellipse
class Noise:
def __init__(self, min_area):
self.min_area = min_area
self.pixel_remove = 255
self.new_pixel = 0
self.ellipse = Ellipse()
def treatment_noise(self, frame, contours):
if frame is None:
raise Exception("Frame is none!")
new_frame = np.copy(frame)
center, _ = self.ellipse.search_ellipse(frame, contours)
if frame[center[1], center[0]] == self.pixel_remove:
new_frame = self.remove_false_center(frame, center)
return new_frame
def remove_false_center(self, frame, center):
new_frame = np.copy(frame)
i, j = center
lin, col = frame.shape
new_frame[i, j] = self.new_pixel
validate = True
increment = 0
while validate:
increment += 1
if i + increment >= lin or j + increment >= col:
break
new_frame[j, i+increment] = new_frame[j, i-increment] = self.new_pixel
new_frame[j+increment, i] = new_frame[j-increment, i] = self.new_pixel
validate = (new_frame[i+2, j] == self.pixel_remove or new_frame[i-2, j] == self.pixel_remove) or (
new_frame[i, j+2] == self.pixel_remove or new_frame[i, j-2] == self.pixel_remove)
return new_frame
| {"hexsha": "0d5bbe3924d1c637a4ba8eb19108d297b3b0b749", "size": 1376, "ext": "py", "lang": "Python", "max_stars_repo_path": "pupilometer/noise.py", "max_stars_repo_name": "marcosvsilva/ProjectPupilometer", "max_stars_repo_head_hexsha": "dadacabdce2c4702de6bafba18b47220e38905b4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pupilometer/noise.py", "max_issues_repo_name": "marcosvsilva/ProjectPupilometer", "max_issues_repo_head_hexsha": "dadacabdce2c4702de6bafba18b47220e38905b4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pupilometer/noise.py", "max_forks_repo_name": "marcosvsilva/ProjectPupilometer", "max_forks_repo_head_hexsha": "dadacabdce2c4702de6bafba18b47220e38905b4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.2765957447, "max_line_length": 110, "alphanum_fraction": 0.59375, "include": true, "reason": "import numpy", "num_tokens": 335} |
import platform
import sys
import os
# The 'sysconfig' module is only available with Python 2.7 and newer, but
# an equivalent module in 'distutils' is available for Python 2.6.
if sys.version < '2.7':
from distutils import sysconfig
else:
import sysconfig
# The 'imp' module is deprecated since Python 3.4, and the use of
# 'importlib' is recommended instead.
if sys.version < '3.4':
import imp
def module_path(name):
if name in sys.builtin_module_names:
return "[builtin module]"
spec = imp.find_module(name)
return spec[1]
else:
from importlib import util
def module_path(name):
if name in sys.builtin_module_names:
return "[builtin module]"
spec = util.find_spec(name)
origin = spec.origin
return origin[:origin.rfind('/')]
# Get appropriate path-entry separator for platform
pathsep = ";" if os.name == "nt" else ":"
# Read default configuration values
config = {
"Architecture" : platform.architecture()[0],
"Version" : str(sys.version).replace("\n", " "),
"VersionNumber" : str(sys.version_info[0]) + "." + str(sys.version_info[1]),
"Prefix" : getattr(sys, "prefix", ""),
"ExecPrefix" : getattr(sys, "exec_prefix", ""),
"BaseExecPrefix" : getattr(sys, "base_exec_prefix", ""),
"BaseExecPrefixLib": getattr(sys, "base_exec_prefix", "") + "/lib",
"PythonPath" : pathsep.join(sys.path[1:]),
"LIBPL" : sysconfig.get_config_var("LIBPL"),
"LIBDIR" : sysconfig.get_config_var("LIBDIR")
}
# Read numpy configuration (if available)
try:
import numpy
config["NumpyPath"] = str(numpy.__path__[0])
config["NumpyVersion"] = str(numpy.__version__)
except:
pass
# Read required module information (if requested)
try:
required_module = os.environ["RETICULATE_REQUIRED_MODULE"]
if required_module is not None and len(required_module) > 0:
config["RequiredModule"] = required_module
config["RequiredModulePath"] = module_path(required_module)
except:
pass
# Write configuration to stdout
lines = [str(key) + ": " + str(val) for (key, val) in config.items()]
text = "\n".join(lines)
sys.stdout.write(text)
| {"hexsha": "48902f8d7df528df0099ed90f334ae3adaaad5be", "size": 2160, "ext": "py", "lang": "Python", "max_stars_repo_path": "inst/config/config.py", "max_stars_repo_name": "pradosj/reticulate", "max_stars_repo_head_hexsha": "66e99d30f21d9479f48e4751d9ca8fcef68e03c0", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "inst/config/config.py", "max_issues_repo_name": "pradosj/reticulate", "max_issues_repo_head_hexsha": "66e99d30f21d9479f48e4751d9ca8fcef68e03c0", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "inst/config/config.py", "max_forks_repo_name": "pradosj/reticulate", "max_forks_repo_head_hexsha": "66e99d30f21d9479f48e4751d9ca8fcef68e03c0", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3043478261, "max_line_length": 81, "alphanum_fraction": 0.6768518519, "include": true, "reason": "import numpy", "num_tokens": 561} |
/*Copyright (c) 2019, Suliman Alsowelim
All rights reserved.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
*/
#import <opencv2/opencv.hpp>
#import "fingerprint.h"
#include <iostream>
#include <algorithm>
#include <vector>
#include <limits>
#include <iterator>
#include <iostream>
#include <typeinfo>
#include <math.h>
#include <chrono>
#include <boost/property_tree/ptree.hpp>
#include <boost/property_tree/json_parser.hpp>
#include <boost/uuid/sha1.hpp>
#include <fstream>
using boost::property_tree::ptree;
using namespace std;
int DEFAULT_FAN_VALUE = 15;
int MIN_HASH_TIME_DELTA = 0;
int MAX_HASH_TIME_DELTA = 200;
int FINGERPRINT_REDUCTION = 20;
int PEAK_NEIGHBORHOOD_SIZE = 20;
float DEFAULT_AMP_MIN = 10;
int DEFAULT_WINDOW_SIZE = 4096;
float DEFAULT_OVERLAP_RATIO = 0.5;
float FS = 44100.0;
std::vector<std::vector<float>> stride_windows(const std::vector<float>& data, size_t blocksize, size_t overlap){
//https://stackoverflow.com/questions/21344296/striding-windows/21345055
std::vector<std::vector<float>> res;
size_t minlen = (data.size() - overlap)/(blocksize - overlap);
auto start = data.begin();
for (size_t i=0; i<blocksize; ++i)
{
res.emplace_back(std::vector<float>());
std::vector<float>& block = res.back();
auto it = start++;
for (size_t j=0; j<minlen; ++j)
{
block.push_back(*it);
std::advance(it,(blocksize-overlap));
}
}
return res;
}
int detrend(std::vector<std::vector<float>>& data){
size_t nocols = data[0].size();
size_t norows = data.size();
float mean = 0;
for (size_t i=0; i<nocols; ++i){
for (size_t j=0; j<norows; ++j){
mean = mean + data[j][i];
}
}
mean = mean/(norows*nocols);
for (size_t i=0; i<nocols; ++i){
for (size_t j=0; j<norows; ++j){
data[j][i] = data[j][i] - mean;
}
}
return 0;
}
std::vector<float> create_window(int wsize){
std::vector<float> res;
float multiplier;
for (int i = 0; i < wsize; i++) {
multiplier = 0.5 - 0.5 *(cos(2.0*M_PI*i/(wsize-1)));
res.emplace_back(multiplier);
}
return res;
}
void apply_window(std::vector<float> &hann_window,std::vector<std::vector<float>>& data){
size_t nocols = data[0].size();
size_t norows = data.size();
for (size_t i=0; i<nocols; ++i){
for (size_t j=0; j<norows; ++j){
data[j][i] = data[j][i] * hann_window[j];
}
}
}
std::string get_sha1(const std::string& p_arg)
{
boost::uuids::detail::sha1 sha1;
sha1.process_bytes(p_arg.data(), p_arg.size());
unsigned hash[5] = {0};
sha1.get_digest(hash);
// Back to string
char buf[41] = {0};
for (int i = 0; i < 5; i++)
{
std::sprintf(buf + (i << 3), "%08x", hash[i]);
}
return std::string(buf);
}
std::string generate_hashes(vector<pair<int,int>> &v_in){
//sorting
//https://stackoverflow.com/questions/279854/how-do-i-sort-a-vector-of-pairs-based-on-the-second-element-of-the-pair
std::sort(v_in.begin(), v_in.end(), [](auto &left, auto &right) {
if (left.second == right.second)
return left.first < right.first;
return left.second < right.second;
});
std::ostringstream buf;
buf << "[";
for(int i=0; i<v_in.size(); i++){
for(int j=1; j<DEFAULT_FAN_VALUE; j++){
if ((i+j) < v_in.size()){
int freq1 = v_in[i].first;
int freq2 = v_in[i+j].first;
int time1 = v_in[i].second;
int time2 = v_in[i+j].second;
int t_delta = time2 - time1;
if ((t_delta >= MIN_HASH_TIME_DELTA) and (t_delta <= MAX_HASH_TIME_DELTA)){
char buffer [100];
snprintf(buffer, sizeof(buffer),"%d|%d|%d", freq1,freq2,t_delta);
std::string to_be_hashed = buffer;
std::string hash_result = get_sha1(to_be_hashed).erase(FINGERPRINT_REDUCTION,40);
ptree pt;
pt.put ("hash", hash_result);
pt.put ("offset", time1);
if(buf.str() != "["){
buf << ",";
}
write_json(buf, pt, false);
}
}
}
}
buf << "]";
return buf.str();
}
vector<pair<int,int>> get_2D_peaks (cv::Mat data){
/* generate binary structure and apply maximum filter*/
cv::Mat tmpkernel = cv::getStructuringElement(cv::MORPH_CROSS,cv::Size(3,3),cv::Point(-1,-1));
cv::Mat kernel = cv::Mat(PEAK_NEIGHBORHOOD_SIZE*2+1,PEAK_NEIGHBORHOOD_SIZE*2+1, CV_8U, uint8_t(0));
kernel.at<uint8_t>(PEAK_NEIGHBORHOOD_SIZE,PEAK_NEIGHBORHOOD_SIZE) = uint8_t(1);
cv::dilate(kernel, kernel, tmpkernel,cv::Point(-1, -1), PEAK_NEIGHBORHOOD_SIZE,1,1);
cv::Mat d1;
cv::dilate(data, d1, kernel);/* d1 now contain m1 with max filter applied */
/* generate eroded background */
cv::Mat background = (data == 0); // 255 if element == 0 , 0 otherwise
cv::Mat local_max = (data == d1); // 255 if true, 0 otherwise
cv::Mat eroded_background;
cv::erode(background, eroded_background, kernel);
cv::Mat detected_peaks = local_max - eroded_background;
/* now detected peaks.size == m1.size .. iterate through m1. get amp where peak == 255 (true), get indices i,j as well.*/
vector<pair<int,int>> freq_time_idx_pairs;
for(int i=0; i<data.rows; ++i){
for(int j=0; j<data.cols; ++j){
if ((detected_peaks.at<uint8_t>(i, j) == 255) and (data.at<float>(i,j) > DEFAULT_AMP_MIN)) {
freq_time_idx_pairs.push_back(std::make_pair(i,j));
}
}
}
return freq_time_idx_pairs;
}
void max_filter(std::vector<std::vector<float>>& data){
//https://gist.github.com/otmb/014107e7b6c6d6a79f0ac1ccc456580a
cv::Mat m1(data.size(), data.at(0).size(), CV_32F);
for(int i=0; i<m1.rows; ++i)
for(int j=0; j<m1.cols; ++j)
m1.at<float>(i, j) = data.at(i).at(j);
/* generate binary structure and apply maximum filter*/
cv::Mat tmpkernel = cv::getStructuringElement(cv::MORPH_CROSS,cv::Size(3,3),cv::Point(-1,-1));
cv::Mat kernel = cv::Mat(PEAK_NEIGHBORHOOD_SIZE*2+1,PEAK_NEIGHBORHOOD_SIZE*2+1, CV_8U, uint8_t(0));
kernel.at<uint8_t>(PEAK_NEIGHBORHOOD_SIZE,PEAK_NEIGHBORHOOD_SIZE) = uint8_t(1);
cv::dilate(kernel, kernel, tmpkernel,cv::Point(-1, -1), PEAK_NEIGHBORHOOD_SIZE,1,1);
cv::Mat d1;
cv::dilate(m1, d1, kernel);
/* d1 now contain m1 with max filter applied */
/* generate eroded background */
cv::Mat background = (m1 == 0);
cv::Mat local_max = (m1 == d1);
cv::Mat eroded_background;
cv::erode(background, eroded_background, kernel);
cv::Mat detected_peaks = local_max - eroded_background;
vector<pair<int,int>> freq_time_idx_pairs;
for(int i=0; i<m1.rows; ++i){
for(int j=0; j<m1.cols; ++j){
if ((detected_peaks.at<uint8_t>(i, j) == 255) and (m1.at<float>(i,j) > DEFAULT_AMP_MIN)) {
freq_time_idx_pairs.push_back(std::make_pair(i,j));
}
}
}
}
std::string fingerprint (float * data, int data_size){
std::vector<float> vec(&data[0], data + data_size);
// see mlab.py on how to decide number of frequencies
int max_freq = 0; //onesided
if (DEFAULT_WINDOW_SIZE % 2 == 0){
max_freq = int(std::floor(DEFAULT_WINDOW_SIZE / 2)) + 1;
}else{
max_freq = int(std::floor((DEFAULT_WINDOW_SIZE+1) / 2));
}
std::vector<std::vector<float>> blocks = stride_windows(vec, DEFAULT_WINDOW_SIZE, DEFAULT_WINDOW_SIZE*DEFAULT_OVERLAP_RATIO);
std::vector<float> hann_window = create_window(DEFAULT_WINDOW_SIZE);
apply_window(hann_window,blocks);
cv::Mat dst(blocks[0].size(),blocks.size(), CV_32F);
for(int i=0; i<dst.rows; ++i)
for(int j=0; j<dst.cols; ++j){
dst.at<float>(i, j) = blocks[j][i];
}
cv::dft(dst,dst,cv::DftFlags::DFT_COMPLEX_OUTPUT+cv::DftFlags::DFT_ROWS,0);
cv::mulSpectrums(dst,dst,dst,0,true);
cv::Mat dst2(max_freq,blocks.at(0).size(), CV_32F);
for(int i=0; i<max_freq; ++i)
for(int j=0; j<dst2.cols; ++j){
dst2.at<float>(i, j) = dst.ptr<float>(j)[2*i];
}
for(int i=1; i<dst2.rows -1; ++i)
for(int j=0; j<dst2.cols; ++j)
dst2.at<float>(i, j) = dst2.at<float>(i, j)*2;
dst2 = dst2 * (1.0/FS);
float sum = 0.0;
float tmp = 0.0;
for(unsigned int i = 0; i < hann_window.size(); i++){
if(hann_window[i] < 0)
tmp = hann_window[i]* -1;
else
tmp = hann_window[i];
sum = sum + (tmp*tmp);
}
dst2 = dst2 * (1.0/sum);
//see https://github.com/worldveil/dejavu/issues/118
float threshold = 0.00000001;
for(int i=0; i<dst2.rows; ++i){
for(int j=0; j<dst2.cols; ++j){
if ((dst2.at<float>(i, j)) < threshold){
dst2.at<float>(i, j) = threshold;
}
dst2.at<float>(i, j) = 10 * log10(dst2.at<float>(i, j));
}
}
vector<pair<int,int>> v_in = get_2D_peaks(dst2);
std::string json = generate_hashes(v_in);
return json;
}
int main () {
std::system("ffmpeg -hide_banner -loglevel panic -i test.mp3 -f s16le -acodec pcm_s16le -ss 0 -ac 1 -ar 22050 - > raw_data ");
//https://www.daniweb.com/programming/software-development/threads/128352/read-a-raw-pcm-file-and-then-play-it-with-sound-in-c-or-c
//https://stackoverflow.com/questions/49161854/reading-raw-audio-file
std::fstream f_in;
short speech;
float data[200000];
f_in.open("raw_data", std::ios::in | std::ios::binary);
int i = 0;
while (true) {
f_in.read((char *)&speech, 2);
if (!f_in.good()){
break;
}
data[i] = speech;
i++;
}
f_in.close();
std::string json = fingerprint(data,i);
cout << json << std::endl;
return 0;
}
| {"hexsha": "d2efe859be6e099fcbb6424af1ae30e1ce6723c0", "size": 9418, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "fingerprint.cpp", "max_stars_repo_name": "salsowelim/dejavu_cpp_port", "max_stars_repo_head_hexsha": "70b4307111be4a2481e40e7d58b763ac50a2081b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 35.0, "max_stars_repo_stars_event_min_datetime": "2019-05-13T17:06:24.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-24T07:33:21.000Z", "max_issues_repo_path": "fingerprint.cpp", "max_issues_repo_name": "salsowelim/dejavu_cpp_port", "max_issues_repo_head_hexsha": "70b4307111be4a2481e40e7d58b763ac50a2081b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 3.0, "max_issues_repo_issues_event_min_datetime": "2019-11-06T07:33:19.000Z", "max_issues_repo_issues_event_max_datetime": "2020-07-17T15:37:16.000Z", "max_forks_repo_path": "fingerprint.cpp", "max_forks_repo_name": "salsowelim/dejavu_cpp_port", "max_forks_repo_head_hexsha": "70b4307111be4a2481e40e7d58b763ac50a2081b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 5.0, "max_forks_repo_forks_event_min_datetime": "2020-07-16T23:00:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-21T07:26:50.000Z", "avg_line_length": 32.2534246575, "max_line_length": 133, "alphanum_fraction": 0.6336801869, "num_tokens": 3038} |
import numpy as np
import tensorflow as tf
from keras.layers import Dense, Input
from keras.models import Model
from keras.layers.advanced_activations import LeakyReLU
from mpi4py import MPI
from keras.optimizers import SGD
import keras.backend as K
import time
def mapping(dim,edim):
n=int(2.5*dim)
Z0=np.random.rand(n,edim)
B1=np.tile(np.random.rand(1,dim),(n,1))-0.5
B2=np.tile(np.random.rand(1,dim),(n,1))-0.5
B3=np.tile(np.random.rand(1,dim),(n,1))-0.5
W1=(np.random.rand(edim,dim)*0.05+0.05)*np.sign(np.random.rand(edim,dim)-0.5)
W2=(np.random.rand(dim,dim)*0.05+0.05)*np.sign(np.random.rand(dim,dim)-0.5)
W3=(np.random.rand(dim,dim)*0.05+0.05)*np.sign(np.random.rand(dim,dim)-0.5)
X1=np.tanh(B1+np.dot(Z0,W1))
X2=B2+np.dot(X1,W2)
X2=np.exp(-X2**2)
X3=2/(1-np.min(X2,0)[np.newaxis,:])*(X2-np.min(X2,0)[np.newaxis,:])-1
X4=B3+np.dot(X3,W3)
X5=X4-X4[0,:]
X5=np.tanh(X5)*0.325
X6=(X5**2-1)*(0.75*X5-0.25)+2/np.pi*np.arcsin(X5)
return X6
def cost_opt(X,opt_it,X0):
M=np.zeros(X.shape)
V=np.zeros(X.shape)
eta=0.001
betam=0.9
betav=0.999
betamh=0.9
betavh=0.999
for i in range(opt_it):
G=gradient(X,X0)
M=betam*M+(1-betam)*G
V=betav*V+(1-betav)*G**2
Mh=M/(1-betamh)
Vh=V/(1-betavh)
betamh=betamh*betam
betavh=betavh*betav
D=eta*Mh/(Vh**0.5 +1e-8)
X=X-D
X=np.clip(X,-1,1)
return cost(X,X0)
def cost_dec(Z,decoder,opt_it,X0):
X=decoder.predict(Z)
M=np.zeros(X.shape)
V=np.zeros(X.shape)
eta=0.001
betam=0.9
betav=0.999
betamh=0.9
betavh=0.999
for i in range(opt_it):
G=gradient(X,X0)
M=betam*M+(1-betam)*G
V=betav*V+(1-betav)*G**2
Mh=M/(1-betamh)
Vh=V/(1-betavh)
betamh=betamh*betam
betavh=betavh*betav
D=eta*Mh/(Vh**0.5 +1e-8)
X=X-D
X=np.clip(X,-1,1)
return cost(X,X0)
def cost(X,X0):
C=np.zeros(len(X))
for i in range(len(X)):
C[i]=np.min(np.sum((X0-X[[i],:])**2,1))
Cn=np.sum((X-0.25)**2,1)
Cn=(1-np.exp(-10*Cn))*(0.4+np.exp(-10*Cn))*3
return C+Cn
def gradient(X,X0):
ECn=np.exp(-10*np.sum((X-0.25)**2,1))
dCndX=2*(X-0.25)
dCndCn=3*(20*ECn**2-6*ECn)
Gn=dCndCn[:,np.newaxis]*dCndX
G0=np.zeros(X.shape)
for i in range(len(X)):
jmin=np.argmin(np.sum((X0-X[[i],:])**2,1))
G0[i,:]=2*(X[i,:]-X0[jmin,:])
G=G0+Gn
return G
def train_autoencoder(AE,X_rank,rank,size,perrank,n_epochs):
num_batches=10
batch_size_perrank=int(perrank/num_batches)
betam=0.9
betav=0.999
betamh=0.9
betavh=0.999
eta=0.001
m=None
v=None
Index=np.arange(perrank)
if rank==0:
optimizer=SGD(learning_rate=eta,momentum=0.0)
comm.Barrier()
for epoch in range(n_epochs):
np.random.shuffle(Index)
if epoch+1>0.9*n_epochs:
num_batches=1
batch_size_perrank=perrank
for batch in range(num_batches):
X_batch=np.copy(X_rank[Index[batch*batch_size_perrank:(batch+1)*batch_size_perrank],:])
if rank==0:
AE_weights=AE.get_weights()
else:
AE_weights=None
AE_weights=comm.bcast(AE_weights,root=0)
AE.set_weights(AE_weights)
with tf.GradientTape() as tape:
X_batch_pred=AE(X_batch)
loss_batch=K.mean((X_batch-X_batch_pred)**2)/size
grad=np.array(tape.gradient(loss_batch,AE.trainable_weights),dtype=object)
Gradient=[None]*len(grad)
for i in range(len(grad)):
Gradient[i]=comm.gather(grad[i],root=0)
# Gradients=comm.gather(grad,root=0)
if rank==0:
# Grad=np.sum(Gradients,0)
Grad=np.sum(Gradient,1)
if epoch==0 and batch==0:
m=(1-betam)*Grad
v=(1-betav)*Grad*Grad
else:
m=betam*m+(1-betam)*Grad
v=betav*v+(1-betav)*Grad*Grad
mh=m/(1-betamh)
vh=v/(1-betavh)
betamh=betamh*betam
betavh=betavh*betav
grad_diff=(1/(vh**0.5+1e-8)*mh).tolist()
optimizer.apply_gradients(zip(grad_diff,AE.trainable_weights))
comm.Barrier()
comm.Barrier()
if rank==0:
AE_weights=AE.get_weights()
else:
AE_weights=None
AE_weights=comm.bcast(AE_weights,root=0)
return AE_weights
comm = MPI.COMM_WORLD
rank = comm.Get_rank()
size = comm.Get_size()
dim=1000
Edim=[10,5]
if rank==0:
D=1
while D<4:
X0=mapping(1000,10)
D=np.sqrt(np.sum((np.mean(X0,0)-0.25)**2))
np.save('Results/Local_minima.npy',X0)
else:
X0=None
comm.Barrier()
X0=comm.bcast(X0,root=0)
######################################################################
## Proposed Method ##
######################################################################
num_samples=10000
perrank=int(num_samples/size)
number_advances=500
opt_it=10
start_time=time.time()
X_rank=np.random.rand(perrank,dim)*2-1
C_rank=np.zeros((perrank,number_advances+1))
C_rank[:,0]=cost(X_rank,X0)
M_rank=np.zeros((perrank,dim))
V_rank=np.zeros((perrank,dim))
eta=0.01
betam=0.9
betav=0.999
betamh=0.9
betavh=0.999
for iteration in range(number_advances):
G_rank=gradient(X_rank,X0)
M_rank=betam*M_rank+(1-betam)*G_rank
V_rank=betav*V_rank+(1-betav)*G_rank**2
Mh_rank=M_rank/(1-betamh)
Vh_rank=V_rank/(1-betavh)
betamh=betamh*betam
betavh=betavh*betav
D_rank=eta*Mh_rank/(Vh_rank**0.5 +1e-8)
X_rank=X_rank-D_rank
X_rank=np.clip(X_rank,-1,1)
C_rank[:,iteration+1]=cost(X_rank,X0)
if rank==0:
C_rand=np.zeros((size,perrank,number_advances+1))
X_rand=np.zeros((size,perrank,dim))
else:
C_rand=None
eval_rand=None
X_rand=None
comm.Barrier()
comm.Gather(C_rank,C_rand,root=0)
comm.Gather(X_rank,X_rand,root=0)
stop_time=time.time()
if rank==0:
C_rand=np.min(C_rand,(0,1))
eval_rand=np.linspace(0,number_advances*size*perrank,number_advances+1)
np.save('Results/X_rand.npy',X_rand.reshape((size*perrank,dim)))
print('Step 1: {:1.1f}s'.format(stop_time-start_time))
for edim in Edim:
start_time=time.time()
## Now train network
design_space=Input((dim))
enc=Dense(dim,activation='tanh')(design_space)
enc=LeakyReLU(alpha=0.3)(enc)
enc=Dense(dim,activation='tanh')(enc)
enc=LeakyReLU(alpha=0.3)(enc)
enc=Dense(dim,activation='tanh')(enc)
enc=LeakyReLU(alpha=0.3)(enc)
enc=Dense(edim,activation='sigmoid')(enc)
encoder=Model(design_space,enc)
latent_space=Input((edim))
dec=Dense(dim,activation='tanh')(latent_space)
dec=LeakyReLU(alpha=0.3)(dec)
dec=Dense(dim,activation='tanh')(dec)
dec=LeakyReLU(alpha=0.3)(dec)
dec=Dense(dim,activation='tanh')(dec)
dec=LeakyReLU(alpha=0.3)(dec)
dec=Dense(dim,activation='tanh')(dec)
decoder=Model(latent_space,dec)
AE=Model(design_space,decoder(encoder(design_space)))
AE_weights=train_autoencoder(AE,X_rank,rank,size,perrank,250)
AE.set_weights(AE_weights)
stop_time=time.time()
if rank==0:
print('Step 2: {:1.1f}s'.format(stop_time-start_time))
start_time=time.time()
# Optimize in latent space
comm.Barrier()
num_pop_perrank=int(np.ceil(10*edim/size))
num_pop=num_pop_perrank*size
prob_change=0.9
multiplyer=0.6
Z_rank=np.random.rand(num_pop_perrank,edim)
F_rank=cost_dec(Z_rank, decoder, opt_it, X0)
comm.Barrier()
if rank==0:
Z_rec=np.empty((size,num_pop_perrank,edim))
F_rec=np.empty((size,num_pop_perrank))
else:
Z_rec=None
F_rec=None
comm.Gather(Z_rank,Z_rec,root=0)
comm.Gather(F_rank,F_rec,root=0)
if rank==0:
Z=Z_rec.reshape((num_pop_perrank*size,edim))
F=F_rec.reshape(num_pop_perrank*size)
else:
Z=None
F=None
Z=comm.bcast(Z,root=0)
F=comm.bcast(F,root=0)
C_ae=np.zeros((num_pop,501))
C_ae[:,0]=F[:num_pop]
loop=0
while loop<500:
Z_rank=Z[rank*num_pop_perrank:(rank+1)*num_pop_perrank,:]
F_rank=F[rank*num_pop_perrank:(rank+1)*num_pop_perrank]
test_case=np.floor(np.random.rand(num_pop_perrank,3)*(num_pop-1e-7)).astype('int')
Za_rank=np.copy(Z[test_case[:,0],:])
Zb_rank=np.copy(Z[test_case[:,1],:])
Zc_rank=np.copy(Z[test_case[:,2],:])
Zcom_rank=Za_rank+multiplyer*(Zb_rank-Zc_rank)
prob=np.random.rand(num_pop_perrank,edim)
Zcom_rank[prob>prob_change]=np.copy(Z_rank[prob>prob_change])
Zcom_rank[Zcom_rank<0]=0
Zcom_rank[Zcom_rank>1]=1
F_compare=cost_dec(Zcom_rank, decoder, opt_it, X0)
F_rank=np.minimum(F_rank,F_compare)
Z_rank[F_compare<=F_rank,:]=Zcom_rank[F_compare<=F_rank,:]
if rank==0:
Z_rec=np.empty((size,num_pop_perrank,edim))
F_rec=np.empty((size,num_pop_perrank))
else:
Z_rec=None
F_rec=None
comm.Barrier()
comm.Gather(Z_rank,Z_rec,root=0)
comm.Gather(F_rank,F_rec,root=0)
if rank==0:
Z=Z_rec.reshape((num_pop_perrank*size,edim))
F=F_rec.reshape(num_pop_perrank*size)
Z=comm.bcast(Z,root=0)
F=comm.bcast(F,root=0)
loop=loop+1
C_ae[:,loop]=F[:num_pop]
C_min_ae=np.min(C_ae,0)
if rank==0:
eval_ae=eval_rand[-1]+np.linspace(0,num_pop*500*(opt_it+1),501)
else:
eval_ae=None
stop_time=time.time()
if rank==0:
print('Step 3: {:1.1f}s'.format(stop_time-start_time))
start_time=time.time()
#post opt
i_post=1000
X_post_rank=decoder.predict(Z_rank)
C_rank=np.zeros((num_pop_perrank,i_post+1))
C_rank[:,0]=cost(X_post_rank,X0)
M_rank=np.zeros((num_pop_perrank,dim))
V_rank=np.zeros((num_pop_perrank,dim))
eta=0.001
betam=0.9
betav=0.999
betamh=0.9
betavh=0.999
for iteration in range(i_post):
G_rank=gradient(X_post_rank,X0)
M_rank=betam*M_rank+(1-betam)*G_rank
V_rank=betav*V_rank+(1-betav)*G_rank**2
Mh_rank=M_rank/(1-betamh)
Vh_rank=V_rank/(1-betavh)
betamh=betamh*betam
betavh=betavh*betav
D_rank=eta*Mh_rank/(Vh_rank**0.5 +1e-8)
X_post_rank=X_post_rank-D_rank
X_post_rank=np.clip(X_post_rank,-1,1)
C_rank[:,iteration+1]=cost(X_post_rank,X0)
if rank==0:
C_post=np.zeros((size,num_pop_perrank,i_post+1))
else:
C_post=None
eval_post=None
data=None
comm.Barrier()
comm.Gather(C_rank,C_post,root=0)
stop_time=time.time()
if rank==0:
print('Step 4: {:1.1f}s'.format(stop_time-start_time))
C_post=np.min(C_post[:,:,opt_it:],(0,1))
eval_post=np.linspace(0,(i_post-opt_it)*num_pop,i_post+1-opt_it)+eval_ae[-1]
data=[C_rand,C_min_ae,C_post,eval_rand,eval_ae,eval_post,0]
np.save('Results/Benchmark_data_edim={}.npy'.format(edim),np.array(data))
######################################################################
## Comparison Method ##
######################################################################
comm.Barrier()
start_time=time.time()
num_pop_perrank=int(np.ceil(0.5*dim/size))
num_pop=size*num_pop_perrank
prob_change=0.9
multiplyer=0.6
X_rank=np.random.rand(num_pop_perrank,dim)
C_rank=cost_opt(X_rank,opt_it, X0)
comm.Barrier()
if rank==0:
X_rec=np.empty((size,num_pop_perrank,dim))
C_rec=np.empty((size,num_pop_perrank))
else:
X_rec=None
C_rec=None
comm.Gather(X_rank,X_rec,root=0)
comm.Gather(C_rank,C_rec,root=0)
if rank==0:
X=X_rec.reshape((num_pop,dim))
C=C_rec.reshape(num_pop)
else:
X=None
C=None
X=comm.bcast(X,root=0)
C=comm.bcast(C,root=0)
C_de=np.zeros((num_pop,1001))
C_de[:,0]=C
loop=0
while loop<1000:
X_rank=X[rank*num_pop_perrank:(rank+1)*num_pop_perrank,:]
C_rank=C[rank*num_pop_perrank:(rank+1)*num_pop_perrank]
test_case=np.floor(np.random.rand(num_pop_perrank,3)*(num_pop-1e-7)).astype('int')
Xa_rank=np.copy(X[test_case[:,0],:])
Xb_rank=np.copy(X[test_case[:,1],:])
Xc_rank=np.copy(X[test_case[:,2],:])
Xcom_rank=Xa_rank+multiplyer*(Xb_rank-Xc_rank)
prob=np.random.rand(num_pop_perrank,dim)
Xcom_rank[prob>prob_change]=np.copy(X_rank[prob>prob_change])
Xcom_rank[Xcom_rank<0]=0
Xcom_rank[Xcom_rank>1]=1
C_compare=cost_opt(Xcom_rank,opt_it, X0)
C_rank=np.minimum(C_rank,C_compare)
X_rank[C_compare<=C_rank,:]=Xcom_rank[C_compare<=C_rank,:]
if rank==0:
X_rec=np.empty((size,num_pop_perrank,dim))
C_rec=np.empty((size,num_pop_perrank))
else:
X_rec=None
C_rec=None
comm.Gather(X_rank,X_rec,root=0)
comm.Gather(C_rank,C_rec,root=0)
if rank==0:
X=X_rec.reshape((num_pop,dim))
C=C_rec.reshape(num_pop)
else:
X=None
C=None
X=comm.bcast(X,root=0)
C=comm.bcast(C,root=0)
loop=loop+1
C_de[:,loop]=C
C_min_de=np.min(C_de,0)
if rank==0:
eval_de=np.linspace(0,num_pop*1000*(opt_it+1),1001)
else:
eval_de=None
stop_time=time.time()
if rank==0:
print('DE: {:1.1f}s'.format(stop_time-start_time))
start_time=time.time()
#post opt
i_post=200
X_post_rank=np.copy(X_rank)
C_rank=np.zeros((num_pop_perrank,i_post+1))
C_rank[:,0]=cost(X_post_rank,X0)
M_rank=np.zeros((num_pop_perrank,dim))
V_rank=np.zeros((num_pop_perrank,dim))
eta=0.001
betam=0.9
betav=0.999
betamh=0.9
betavh=0.999
for it_post in range(i_post):
G_rank=gradient(X_post_rank,X0)
M_rank=betam*M_rank+(1-betam)*G_rank
V_rank=betav*V_rank+(1-betav)*G_rank**2
Mh_rank=M_rank/(1-betamh)
Vh_rank=V_rank/(1-betavh)
betamh=betamh*betam
betavh=betavh*betav
D_rank=eta*Mh_rank/(Vh_rank**0.5 +1e-8)
X_post_rank=X_post_rank-D_rank
X_post_rank=np.clip(X_post_rank,-1,1)
C_rank[:,it_post+1]=cost(X_post_rank,X0)
if rank==0:
C_post=np.zeros((size,num_pop_perrank,i_post+1))
else:
C_post=None
eval_post=None
data=None
comm.Barrier()
comm.Gather(C_rank,C_post,root=0)
stop_time=time.time()
if rank==0:
print('LO: {:1.1f}s'.format(stop_time-start_time))
C_post=np.min(C_post[:,:,opt_it:],(0,1))
eval_post=np.linspace(0,(i_post-opt_it)*num_pop,i_post+1-opt_it)+eval_de[-1]
data=[C_min_de,C_post,eval_de,eval_post,0]
np.save('Results/Benchmark_test_comp_data.npy',np.array(data))
| {"hexsha": "421664ba45d6daa2ebb9e91f16accd1c2b71a245", "size": 14938, "ext": "py", "lang": "Python", "max_stars_repo_path": "Benchmark_functions/C1/benchmark_test_c1.py", "max_stars_repo_name": "julianschumann/ae-opt", "max_stars_repo_head_hexsha": "611b6c893546267732a2d690df20a4cc238002e6", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Benchmark_functions/C1/benchmark_test_c1.py", "max_issues_repo_name": "julianschumann/ae-opt", "max_issues_repo_head_hexsha": "611b6c893546267732a2d690df20a4cc238002e6", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Benchmark_functions/C1/benchmark_test_c1.py", "max_forks_repo_name": "julianschumann/ae-opt", "max_forks_repo_head_hexsha": "611b6c893546267732a2d690df20a4cc238002e6", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.5801980198, "max_line_length": 100, "alphanum_fraction": 0.6056366314, "include": true, "reason": "import numpy", "num_tokens": 4863} |
% VL_LBP Local Binary Patterns
% F = VL_LBP(IM, CELLSIZE) computes the Local Binary Pattern (LBP)
% features for image I.
%
% IM is divided in cells of size CELLSIZE. F is a three-dimensional
% array containing one histograms of quantized LBP features per
% cell. The witdh of F is FLOOR(WIDTH/CELLSIZE), where WIDTH is the
% width of the image. The same for the height. The third dimension
% is 58.
%
% See also: VL_HELP().
% AUTORIGHTS
| {"author": "Cloud-CV", "repo": "object-proposals", "sha": "597a89520bc1b0b261420d7627b8c36439a24c7a", "save_path": "github-repos/MATLAB/Cloud-CV-object-proposals", "path": "github-repos/MATLAB/Cloud-CV-object-proposals/object-proposals-597a89520bc1b0b261420d7627b8c36439a24c7a/dependencies/vlfeat-0.9.16/toolbox/misc/vl_lbp.m"} |
"""
Modeling Relational Data with Graph Convolutional Networks
Paper: https://arxiv.org/abs/1703.06103
Code: https://github.com/tkipf/relational-gcn
Difference compared to tkipf/relation-gcn
* l2norm applied to all weights
* remove nodes that won't be touched
"""
import argparse
import itertools
import numpy as np
import time
import os
os.environ['DGLBACKEND']='pytorch'
import torch as th
import torch.nn as nn
import torch.nn.functional as F
import torch.multiprocessing as mp
from torch.multiprocessing import Queue
from torch.nn.parallel import DistributedDataParallel
from torch.utils.data import DataLoader
import dgl
from dgl import DGLGraph
from dgl.distributed import DistDataLoader
from functools import partial
from dgl.nn import RelGraphConv
import tqdm
from ogb.nodeproppred import DglNodePropPredDataset
from pyinstrument import Profiler
class EntityClassify(nn.Module):
""" Entity classification class for RGCN
Parameters
----------
device : int
Device to run the layer.
num_nodes : int
Number of nodes.
h_dim : int
Hidden dim size.
out_dim : int
Output dim size.
num_rels : int
Numer of relation types.
num_bases : int
Number of bases. If is none, use number of relations.
num_hidden_layers : int
Number of hidden RelGraphConv Layer
dropout : float
Dropout
use_self_loop : bool
Use self loop if True, default False.
low_mem : bool
True to use low memory implementation of relation message passing function
trade speed with memory consumption
"""
def __init__(self,
device,
h_dim,
out_dim,
num_rels,
num_bases=None,
num_hidden_layers=1,
dropout=0,
use_self_loop=False,
low_mem=False,
layer_norm=False):
super(EntityClassify, self).__init__()
self.device = device
self.h_dim = h_dim
self.out_dim = out_dim
self.num_rels = num_rels
self.num_bases = None if num_bases < 0 else num_bases
self.num_hidden_layers = num_hidden_layers
self.dropout = dropout
self.use_self_loop = use_self_loop
self.low_mem = low_mem
self.layer_norm = layer_norm
self.layers = nn.ModuleList()
# i2h
self.layers.append(RelGraphConv(
self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
low_mem=self.low_mem, dropout=self.dropout))
# h2h
for idx in range(self.num_hidden_layers):
self.layers.append(RelGraphConv(
self.h_dim, self.h_dim, self.num_rels, "basis",
self.num_bases, activation=F.relu, self_loop=self.use_self_loop,
low_mem=self.low_mem, dropout=self.dropout))
# h2o
self.layers.append(RelGraphConv(
self.h_dim, self.out_dim, self.num_rels, "basis",
self.num_bases, activation=None,
self_loop=self.use_self_loop,
low_mem=self.low_mem))
def forward(self, blocks, feats, norm=None):
if blocks is None:
# full graph training
blocks = [self.g] * len(self.layers)
h = feats
for layer, block in zip(self.layers, blocks):
block = block.to(self.device)
h = layer(block, h, block.edata[dgl.ETYPE], block.edata['norm'])
return h
def init_emb(shape, dtype):
arr = th.zeros(shape, dtype=dtype)
nn.init.uniform_(arr, -1.0, 1.0)
return arr
class DistEmbedLayer(nn.Module):
r"""Embedding layer for featureless heterograph.
Parameters
----------
dev_id : int
Device to run the layer.
g : DistGraph
training graph
embed_size : int
Output embed size
sparse_emb: bool
Whether to use sparse embedding
Default: False
dgl_sparse_emb: bool
Whether to use DGL sparse embedding
Default: False
embed_name : str, optional
Embed name
"""
def __init__(self,
dev_id,
g,
embed_size,
sparse_emb=False,
dgl_sparse_emb=False,
feat_name='feat',
embed_name='node_emb'):
super(DistEmbedLayer, self).__init__()
self.dev_id = dev_id
self.embed_size = embed_size
self.embed_name = embed_name
self.feat_name = feat_name
self.sparse_emb = sparse_emb
self.g = g
self.ntype_id_map = {g.get_ntype_id(ntype):ntype for ntype in g.ntypes}
self.node_projs = nn.ModuleDict()
for ntype in g.ntypes:
if feat_name in g.nodes[ntype].data:
self.node_projs[ntype] = nn.Linear(g.nodes[ntype].data[feat_name].shape[1], embed_size)
nn.init.xavier_uniform_(self.node_projs[ntype].weight)
print('node {} has data {}'.format(ntype, feat_name))
if sparse_emb:
if dgl_sparse_emb:
self.node_embeds = {}
for ntype in g.ntypes:
# We only create embeddings for nodes without node features.
if feat_name not in g.nodes[ntype].data:
part_policy = g.get_node_partition_policy(ntype)
self.node_embeds[ntype] = dgl.distributed.DistEmbedding(g.number_of_nodes(ntype),
self.embed_size,
embed_name + '_' + ntype,
init_emb,
part_policy)
else:
self.node_embeds = nn.ModuleDict()
for ntype in g.ntypes:
# We only create embeddings for nodes without node features.
if feat_name not in g.nodes[ntype].data:
self.node_embeds[ntype] = th.nn.Embedding(g.number_of_nodes(ntype), self.embed_size, sparse=self.sparse_emb)
nn.init.uniform_(self.node_embeds[ntype].weight, -1.0, 1.0)
else:
self.node_embeds = nn.ModuleDict()
for ntype in g.ntypes:
# We only create embeddings for nodes without node features.
if feat_name not in g.nodes[ntype].data:
self.node_embeds[ntype] = th.nn.Embedding(g.number_of_nodes(ntype), self.embed_size)
nn.init.uniform_(self.node_embeds[ntype].weight, -1.0, 1.0)
def forward(self, node_ids, ntype_ids):
"""Forward computation
Parameters
----------
node_ids : Tensor
node ids to generate embedding for.
ntype_ids : Tensor
node type ids
Returns
-------
tensor
embeddings as the input of the next layer
"""
embeds = th.empty(node_ids.shape[0], self.embed_size, device=self.dev_id)
for ntype_id in th.unique(ntype_ids).tolist():
ntype = self.ntype_id_map[int(ntype_id)]
loc = ntype_ids == ntype_id
if self.feat_name in self.g.nodes[ntype].data:
embeds[loc] = self.node_projs[ntype](self.g.nodes[ntype].data[self.feat_name][node_ids[ntype_ids == ntype_id]].to(self.dev_id))
else:
embeds[loc] = self.node_embeds[ntype](node_ids[ntype_ids == ntype_id]).to(self.dev_id)
return embeds
def compute_acc(results, labels):
"""
Compute the accuracy of prediction given the labels.
"""
labels = labels.long()
return (results == labels).float().sum() / len(results)
def gen_norm(g):
_, v, eid = g.all_edges(form='all')
_, inverse_index, count = th.unique(v, return_inverse=True, return_counts=True)
degrees = count[inverse_index]
norm = th.ones(eid.shape[0], device=eid.device) / degrees
norm = norm.unsqueeze(1)
g.edata['norm'] = norm
def evaluate(g, model, embed_layer, labels, eval_loader, test_loader, all_val_nid, all_test_nid):
model.eval()
embed_layer.eval()
eval_logits = []
eval_seeds = []
global_results = dgl.distributed.DistTensor(labels.shape, th.long, 'results', persistent=True)
with th.no_grad():
for sample_data in tqdm.tqdm(eval_loader):
seeds, blocks = sample_data
for block in blocks:
gen_norm(block)
feats = embed_layer(blocks[0].srcdata[dgl.NID], blocks[0].srcdata[dgl.NTYPE])
logits = model(blocks, feats)
eval_logits.append(logits.cpu().detach())
assert np.all(seeds.numpy() < g.number_of_nodes('paper'))
eval_seeds.append(seeds.cpu().detach())
eval_logits = th.cat(eval_logits)
eval_seeds = th.cat(eval_seeds)
global_results[eval_seeds] = eval_logits.argmax(dim=1)
test_logits = []
test_seeds = []
with th.no_grad():
for sample_data in tqdm.tqdm(test_loader):
seeds, blocks = sample_data
for block in blocks:
gen_norm(block)
feats = embed_layer(blocks[0].srcdata[dgl.NID], blocks[0].srcdata[dgl.NTYPE])
logits = model(blocks, feats)
test_logits.append(logits.cpu().detach())
assert np.all(seeds.numpy() < g.number_of_nodes('paper'))
test_seeds.append(seeds.cpu().detach())
test_logits = th.cat(test_logits)
test_seeds = th.cat(test_seeds)
global_results[test_seeds] = test_logits.argmax(dim=1)
g.barrier()
if g.rank() == 0:
return compute_acc(global_results[all_val_nid], labels[all_val_nid]), \
compute_acc(global_results[all_test_nid], labels[all_test_nid])
else:
return -1, -1
class NeighborSampler:
"""Neighbor sampler
Parameters
----------
g : DGLHeterograph
Full graph
target_idx : tensor
The target training node IDs in g
fanouts : list of int
Fanout of each hop starting from the seed nodes. If a fanout is None,
sample full neighbors.
"""
def __init__(self, g, fanouts, sample_neighbors):
self.g = g
self.fanouts = fanouts
self.sample_neighbors = sample_neighbors
def sample_blocks(self, seeds):
"""Do neighbor sample
Parameters
----------
seeds :
Seed nodes
Returns
-------
tensor
Seed nodes, also known as target nodes
blocks
Sampled subgraphs
"""
blocks = []
etypes = []
norms = []
ntypes = []
seeds = th.LongTensor(np.asarray(seeds))
gpb = self.g.get_partition_book()
# We need to map the per-type node IDs to homogeneous IDs.
cur = gpb.map_to_homo_nid(seeds, 'paper')
for fanout in self.fanouts:
# For a heterogeneous input graph, the returned frontier is stored in
# the homogeneous graph format.
frontier = self.sample_neighbors(self.g, cur, fanout, replace=False)
block = dgl.to_block(frontier, cur)
cur = block.srcdata[dgl.NID]
block.edata[dgl.EID] = frontier.edata[dgl.EID]
# Map the homogeneous edge Ids to their edge type.
block.edata[dgl.ETYPE], block.edata[dgl.EID] = gpb.map_to_per_etype(block.edata[dgl.EID])
# Map the homogeneous node Ids to their node types and per-type Ids.
block.srcdata[dgl.NTYPE], block.srcdata[dgl.NID] = gpb.map_to_per_ntype(block.srcdata[dgl.NID])
block.dstdata[dgl.NTYPE], block.dstdata[dgl.NID] = gpb.map_to_per_ntype(block.dstdata[dgl.NID])
blocks.insert(0, block)
return seeds, blocks
def run(args, device, data):
g, num_classes, train_nid, val_nid, test_nid, labels, all_val_nid, all_test_nid = data
num_rels = len(g.etypes)
fanouts = [int(fanout) for fanout in args.fanout.split(',')]
val_fanouts = [int(fanout) for fanout in args.validation_fanout.split(',')]
sampler = NeighborSampler(g, fanouts, dgl.distributed.sample_neighbors)
# Create DataLoader for constructing blocks
dataloader = DistDataLoader(
dataset=train_nid,
batch_size=args.batch_size,
collate_fn=sampler.sample_blocks,
shuffle=True,
drop_last=False)
valid_sampler = NeighborSampler(g, val_fanouts, dgl.distributed.sample_neighbors)
# Create DataLoader for constructing blocks
valid_dataloader = DistDataLoader(
dataset=val_nid,
batch_size=args.batch_size,
collate_fn=valid_sampler.sample_blocks,
shuffle=False,
drop_last=False)
test_sampler = NeighborSampler(g, [-1] * args.n_layers, dgl.distributed.sample_neighbors)
# Create DataLoader for constructing blocks
test_dataloader = DistDataLoader(
dataset=test_nid,
batch_size=args.batch_size,
collate_fn=test_sampler.sample_blocks,
shuffle=False,
drop_last=False)
embed_layer = DistEmbedLayer(device,
g,
args.n_hidden,
sparse_emb=args.sparse_embedding,
dgl_sparse_emb=args.dgl_sparse,
feat_name='feat')
model = EntityClassify(device,
args.n_hidden,
num_classes,
num_rels,
num_bases=args.n_bases,
num_hidden_layers=args.n_layers-2,
dropout=args.dropout,
use_self_loop=args.use_self_loop,
low_mem=args.low_mem,
layer_norm=args.layer_norm)
model = model.to(device)
if not args.standalone:
model = th.nn.parallel.DistributedDataParallel(model)
# If there are dense parameters in the embedding layer
# or we use Pytorch saprse embeddings.
if len(embed_layer.node_projs) > 0 or not args.dgl_sparse:
embed_layer = DistributedDataParallel(embed_layer, device_ids=None, output_device=None)
if args.sparse_embedding:
if args.dgl_sparse and args.standalone:
emb_optimizer = dgl.distributed.SparseAdagrad(list(embed_layer.node_embeds.values()), lr=args.sparse_lr)
print('optimize DGL sparse embedding:', embed_layer.node_embeds.keys())
elif args.dgl_sparse:
emb_optimizer = dgl.distributed.SparseAdagrad(list(embed_layer.module.node_embeds.values()), lr=args.sparse_lr)
print('optimize DGL sparse embedding:', embed_layer.module.node_embeds.keys())
elif args.standalone:
emb_optimizer = th.optim.SparseAdam(list(embed_layer.node_embeds.parameters()), lr=args.sparse_lr)
print('optimize Pytorch sparse embedding:', embed_layer.node_embeds)
else:
emb_optimizer = th.optim.SparseAdam(list(embed_layer.module.node_embeds.parameters()), lr=args.sparse_lr)
print('optimize Pytorch sparse embedding:', embed_layer.module.node_embeds)
dense_params = list(model.parameters())
if args.node_feats:
if args.standalone:
dense_params += list(embed_layer.node_projs.parameters())
print('optimize dense projection:', embed_layer.node_projs)
else:
dense_params += list(embed_layer.module.node_projs.parameters())
print('optimize dense projection:', embed_layer.module.node_projs)
optimizer = th.optim.Adam(dense_params, lr=args.lr, weight_decay=args.l2norm)
else:
all_params = list(model.parameters()) + list(embed_layer.parameters())
optimizer = th.optim.Adam(all_params, lr=args.lr, weight_decay=args.l2norm)
# training loop
print("start training...")
for epoch in range(args.n_epochs):
tic = time.time()
sample_time = 0
copy_time = 0
forward_time = 0
backward_time = 0
update_time = 0
number_train = 0
step_time = []
iter_t = []
sample_t = []
feat_copy_t = []
forward_t = []
backward_t = []
update_t = []
iter_tput = []
start = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
step_time = []
for step, sample_data in enumerate(dataloader):
seeds, blocks = sample_data
number_train += seeds.shape[0]
tic_step = time.time()
sample_time += tic_step - start
sample_t.append(tic_step - start)
for block in blocks:
gen_norm(block)
feats = embed_layer(blocks[0].srcdata[dgl.NID], blocks[0].srcdata[dgl.NTYPE])
label = labels[seeds]
copy_time = time.time()
feat_copy_t.append(copy_time - tic_step)
# forward
logits = model(blocks, feats)
loss = F.cross_entropy(logits, label)
forward_end = time.time()
# backward
optimizer.zero_grad()
if args.sparse_embedding and not args.dgl_sparse:
emb_optimizer.zero_grad()
loss.backward()
optimizer.step()
if args.sparse_embedding:
emb_optimizer.step()
compute_end = time.time()
forward_t.append(forward_end - copy_time)
backward_t.append(compute_end - forward_end)
# Aggregate gradients in multiple nodes.
update_t.append(time.time() - compute_end)
step_t = time.time() - start
step_time.append(step_t)
train_acc = th.sum(logits.argmax(dim=1) == label).item() / len(seeds)
if step % args.log_every == 0:
print('[{}] Epoch {:05d} | Step {:05d} | Train acc {:.4f} | Loss {:.4f} | time {:.3f} s' \
'| sample {:.3f} | copy {:.3f} | forward {:.3f} | backward {:.3f} | update {:.3f}'.format(
g.rank(), epoch, step, train_acc, loss.item(), np.sum(step_time[-args.log_every:]),
np.sum(sample_t[-args.log_every:]), np.sum(feat_copy_t[-args.log_every:]), np.sum(forward_t[-args.log_every:]),
np.sum(backward_t[-args.log_every:]), np.sum(update_t[-args.log_every:])))
start = time.time()
print('[{}]Epoch Time(s): {:.4f}, sample: {:.4f}, data copy: {:.4f}, forward: {:.4f}, backward: {:.4f}, update: {:.4f}, #number_train: {}'.format(
g.rank(), np.sum(step_time), np.sum(sample_t), np.sum(feat_copy_t), np.sum(forward_t), np.sum(backward_t), np.sum(update_t), number_train))
epoch += 1
start = time.time()
g.barrier()
val_acc, test_acc = evaluate(g, model, embed_layer, labels,
valid_dataloader, test_dataloader, all_val_nid, all_test_nid)
if val_acc >= 0:
print('Val Acc {:.4f}, Test Acc {:.4f}, time: {:.4f}'.format(val_acc, test_acc,
time.time() - start))
def main(args):
dgl.distributed.initialize(args.ip_config, args.num_servers, num_workers=args.num_workers)
if not args.standalone:
th.distributed.init_process_group(backend='gloo')
g = dgl.distributed.DistGraph(args.graph_name, part_config=args.conf_path)
print('rank:', g.rank())
pb = g.get_partition_book()
train_nid = dgl.distributed.node_split(g.nodes['paper'].data['train_mask'], pb, ntype='paper', force_even=True)
val_nid = dgl.distributed.node_split(g.nodes['paper'].data['val_mask'], pb, ntype='paper', force_even=True)
test_nid = dgl.distributed.node_split(g.nodes['paper'].data['test_mask'], pb, ntype='paper', force_even=True)
local_nid = pb.partid2nids(pb.partid, 'paper').detach().numpy()
print('part {}, train: {} (local: {}), val: {} (local: {}), test: {} (local: {})'.format(
g.rank(), len(train_nid), len(np.intersect1d(train_nid.numpy(), local_nid)),
len(val_nid), len(np.intersect1d(val_nid.numpy(), local_nid)),
len(test_nid), len(np.intersect1d(test_nid.numpy(), local_nid))))
device = th.device('cpu')
labels = g.nodes['paper'].data['labels'][np.arange(g.number_of_nodes('paper'))]
all_val_nid = th.LongTensor(np.nonzero(g.nodes['paper'].data['val_mask'][np.arange(g.number_of_nodes('paper'))])).squeeze()
all_test_nid = th.LongTensor(np.nonzero(g.nodes['paper'].data['test_mask'][np.arange(g.number_of_nodes('paper'))])).squeeze()
n_classes = len(th.unique(labels[labels >= 0]))
print('#classes:', n_classes)
run(args, device, (g, n_classes, train_nid, val_nid, test_nid, labels, all_val_nid, all_test_nid))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='RGCN')
# distributed training related
parser.add_argument('--graph-name', type=str, help='graph name')
parser.add_argument('--id', type=int, help='the partition id')
parser.add_argument('--ip-config', type=str, help='The file for IP configuration')
parser.add_argument('--conf-path', type=str, help='The path to the partition config file')
parser.add_argument('--num-client', type=int, help='The number of clients')
parser.add_argument('--num-servers', type=int, default=1, help='Server count on each machine.')
# rgcn related
parser.add_argument("--gpu", type=str, default='0',
help="gpu")
parser.add_argument("--dropout", type=float, default=0,
help="dropout probability")
parser.add_argument("--n-hidden", type=int, default=16,
help="number of hidden units")
parser.add_argument("--lr", type=float, default=1e-2,
help="learning rate")
parser.add_argument("--sparse-lr", type=float, default=1e-2,
help="sparse lr rate")
parser.add_argument("--n-bases", type=int, default=-1,
help="number of filter weight matrices, default: -1 [use all]")
parser.add_argument("--n-layers", type=int, default=2,
help="number of propagation rounds")
parser.add_argument("-e", "--n-epochs", type=int, default=50,
help="number of training epochs")
parser.add_argument("-d", "--dataset", type=str, required=True,
help="dataset to use")
parser.add_argument("--l2norm", type=float, default=0,
help="l2 norm coef")
parser.add_argument("--relabel", default=False, action='store_true',
help="remove untouched nodes and relabel")
parser.add_argument("--fanout", type=str, default="4, 4",
help="Fan-out of neighbor sampling.")
parser.add_argument("--validation-fanout", type=str, default=None,
help="Fan-out of neighbor sampling during validation.")
parser.add_argument("--use-self-loop", default=False, action='store_true',
help="include self feature as a special relation")
parser.add_argument("--batch-size", type=int, default=100,
help="Mini-batch size. ")
parser.add_argument("--eval-batch-size", type=int, default=128,
help="Mini-batch size. ")
parser.add_argument('--log-every', type=int, default=20)
parser.add_argument("--num-workers", type=int, default=1,
help="Number of workers for distributed dataloader.")
parser.add_argument("--low-mem", default=False, action='store_true',
help="Whether use low mem RelGraphCov")
parser.add_argument("--mix-cpu-gpu", default=False, action='store_true',
help="Whether store node embeddins in cpu")
parser.add_argument("--sparse-embedding", action='store_true',
help='Use sparse embedding for node embeddings.')
parser.add_argument("--dgl-sparse", action='store_true',
help='Whether to use DGL sparse embedding')
parser.add_argument('--node-feats', default=False, action='store_true',
help='Whether use node features')
parser.add_argument('--layer-norm', default=False, action='store_true',
help='Use layer norm')
parser.add_argument('--local_rank', type=int, help='get rank of the process')
parser.add_argument('--standalone', action='store_true', help='run in the standalone mode')
args = parser.parse_args()
# if validation_fanout is None, set it with args.fanout
if args.validation_fanout is None:
args.validation_fanout = args.fanout
print(args)
main(args)
| {"hexsha": "d4d91fd858fd54adaa3a4b1c5dab60fd478fe19b", "size": 24691, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/pytorch/rgcn/experimental/entity_classify_dist.py", "max_stars_repo_name": "Padarn/dgl", "max_stars_repo_head_hexsha": "5087a21279be98021fddfd1ba61487be4adfede8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-12T08:14:04.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-12T08:14:04.000Z", "max_issues_repo_path": "examples/pytorch/rgcn/experimental/entity_classify_dist.py", "max_issues_repo_name": "Padarn/dgl", "max_issues_repo_head_hexsha": "5087a21279be98021fddfd1ba61487be4adfede8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/pytorch/rgcn/experimental/entity_classify_dist.py", "max_forks_repo_name": "Padarn/dgl", "max_forks_repo_head_hexsha": "5087a21279be98021fddfd1ba61487be4adfede8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2022-02-08T11:27:24.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-08T11:27:24.000Z", "avg_line_length": 42.3516295026, "max_line_length": 154, "alphanum_fraction": 0.610829857, "include": true, "reason": "import numpy", "num_tokens": 5649} |
from django.shortcuts import render
from django.http import HttpResponse, Http404
import os
from os import path
import numpy as np
import pandas as pd
from PIL import Image
from wordcloud import WordCloud, STOPWORDS, ImageColorGenerator
import matplotlib.pyplot as plt
from newspaper import Article
import nltk
nltk.download('punkt')
import re, pprint
from nltk import word_tokenize
from hatesonar import Sonar
from django.views.static import serve
def checkKey(dict, key):
if key in dict.keys():
return True
print("Present, ", end =" ")
print("value =", dict[key])
else:
return False
print("Not present")
def home(request):
return render(request, 'consciousApp/home.html')
def ocr(request):
return render(request, 'consciousApp/ocr.html')
def texttobrf(request):
print(request.POST)
#data=request.POST.get('text_data')
data=dict(request.POST)
text_data=data['text_data']
text_file = open('./consciousApp/static/consciousApp/input/data.txt', 'w+')
text_file.write(str(text_data[0]))
text_file.close()
os.system("./consciousApp/static/consciousApp/file2brl/file2brl ./consciousApp/static/consciousApp/input/data.txt ./consciousApp/static/consciousApp/output/data.brf")
return render(request,'consciousApp/braille.html')
def braille(request):
val = 'I am reading Braille'
print(request.POST)
key = 'download'
one=checkKey(dict(request.POST), key);
if one == True:
filepath = './consciousApp/static/consciousApp/output/data.brf'
return serve(request, os.path.basename(filepath), os.path.dirname(filepath))
if request.method=='POST':
val = request.POST['some_text']
return render(request,'consciousApp/braille.html', {'val': val})
def triggers(request):
if request.method=='POST':
print(request.POST)
data=dict(request.POST)
# Driver Code
key = 'show_details'
one=checkKey(data, key);
key = 'check_triggers'
two=checkKey(data, key)
key = 'show_wordcloud'
three=checkKey(data, key)
key = 'hate_speech'
four=checkKey(data, key)
print(one,two,three)
#URL Link case
if(one==True):
url=data['Link'][0]
print(url)
article = Article(url)
article.download()
article.parse()
authors=article.authors
publishdate=article.publish_date
#article.text
article.nlp()
keywords=article.keywords
articlesummary=article.summary
return render(request, 'consciousApp/triggers.html', {'authors':authors , 'publishdate': publishdate,'keywords':keywords,'articlesummary':articlesummary})
#Show triggers
elif(two==True):
text = request.POST['input_text'].lower()
triggers = ["9 11", "9-11", "9/11", "ableism", "abusive", "ageism", "alcoholism", "animal abuse", "animal death", "animal violence", "bestiality", "gore", "corpse", "bully", "cannibal", "car accident", "child abuse", "childbirth", "classism", "death", "decapitation", "abuse", "drug", "heroin", "cocaine", "eating disorder", "anorexia", "binge eating", "bulimia", "fatphobia", "forced captivity", "holocaust", "hitler", "homophobia", "hostage", "incest", "kidnap", "murder", "nazi", "overdose", "pedophilia", "prostitution", "PTSD", "racism", "racist", "rape", "raping", "scarification", "self-harm", "self harm", "cutting", "sexism", "slavery", "slurs", "suicide", "suicidal", "swearing", "terminal illness", "terrorism", "torture", "transphobia", "violence", "warfare"]
tw = []
text_file = open('./consciousApp/static/consciousApp/input/triggercheckdata.txt', 'w+')
text_file.write(str(text))
text_file.close()
for trigger in triggers:
if text.find(trigger) > -1: tw.append(trigger)
if tw == []: tw.append('No Triggers Found')
return render(request, 'consciousApp/triggers.html', {'text': text, 'triggers': tw,'data':data})
#Show_cloud
elif(three==True):
text = request.POST['input_text'].lower()
tokens = word_tokenize(text)
textdata = nltk.Text(tokens)
stopwords = set(STOPWORDS)
wordcloud = WordCloud(stopwords=stopwords, max_font_size=50, max_words=100, background_color="white").generate(text)
wordcloud.to_file("./consciousApp/static/consciousApp/output/word-cloud.png")
data="./../../static/consciousApp/output/word-cloud.png"
return render(request, 'consciousApp/triggers.html', {'data': data} )
elif(four==True):
sonar = Sonar();
text = request.POST['input_text'].lower();
url=data['Link'][0];
data=sonar.ping(text=text)["classes"];
hate_speech=data[0];
hate_speech_confidence=hate_speech["confidence"]*100;
offensive_language=data[1];
offensive_language_confidence=offensive_language["confidence"]*100;
neither=data[2];
neither_confidence=neither["confidence"]*100;
print(type(data))
print(offensive_language_confidence*100,hate_speech_confidence*100,neither_confidence*100)
return render(request, 'consciousApp/triggers.html',{'hate_speech_confidence':hate_speech_confidence,'offensive_language_confidence':offensive_language_confidence,'neither_confidence':neither_confidence})
else:
return render(request, 'consciousApp/triggers.html')
def dyslexicsol(request):
val = 'Hello! Convert your text into dyslexic readable form.'
if request.method == 'POST':
val = request.POST['some_text']
return render(request,'consciousApp/open-dyslexic.html', {'val':val})
| {"hexsha": "b2eee6ce91ad749da3f23bd12f42ce53bcb15b02", "size": 6178, "ext": "py", "lang": "Python", "max_stars_repo_path": "consciousApp/views.py", "max_stars_repo_name": "ashwani-rathee/Conscious", "max_stars_repo_head_hexsha": "8eda41f21bf5e574e657d325f9c86e20635e812a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-01-18T09:43:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-07T09:22:09.000Z", "max_issues_repo_path": "consciousApp/views.py", "max_issues_repo_name": "ashwani-rathee/Conscious", "max_issues_repo_head_hexsha": "8eda41f21bf5e574e657d325f9c86e20635e812a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "consciousApp/views.py", "max_forks_repo_name": "ashwani-rathee/Conscious", "max_forks_repo_head_hexsha": "8eda41f21bf5e574e657d325f9c86e20635e812a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-01-19T07:09:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-18T15:33:36.000Z", "avg_line_length": 46.4511278195, "max_line_length": 787, "alphanum_fraction": 0.6133052768, "include": true, "reason": "import numpy", "num_tokens": 1421} |
""" Ichimoku Indicator
"""
import math
import numpy
import pandas
from talib import abstract
from analyzers.utils import IndicatorUtils
class Ichimoku(IndicatorUtils):
def analyze(self, historical_data, signal=['leading_span_a', 'leading_span_b'], hot_thresh=None, cold_thresh=None):
"""Performs an ichimoku cloud analysis on the historical data
Args:
historical_data (list): A matrix of historical OHCLV data.
signal (list, optional): Defaults to leading_span_a and leading_span_b. The indicator
line to check hot/cold against.
hot_thresh (float, optional): Defaults to None. The threshold at which this might be
good to purchase.
cold_thresh (float, optional): Defaults to None. The threshold at which this might be
good to sell.
Returns:
pandas.DataFrame: A dataframe containing the indicators and hot/cold values.
"""
tenkansen_period = 9
kijunsen_period = 26
leading_span_b_period = 52
dataframe = self.convert_to_dataframe(historical_data)
ichimoku_columns = {
'tenkansen': [numpy.nan] * dataframe.index.shape[0],
'kijunsen': [numpy.nan] * dataframe.index.shape[0],
'leading_span_a': [numpy.nan] * dataframe.index.shape[0],
'leading_span_b': [numpy.nan] * dataframe.index.shape[0]
}
ichimoku_values = pandas.DataFrame(
ichimoku_columns,
index=dataframe.index
)
ichimoku_df_size = ichimoku_values.shape[0]
for index in range(tenkansen_period, ichimoku_df_size):
start_index = index - tenkansen_period
last_index = index + 1
tankansen_min = dataframe['low'][start_index:last_index].min()
tankansen_max = dataframe['high'][start_index:last_index].max()
ichimoku_values['tenkansen'][index] = (tankansen_min + tankansen_max) / 2
for index in range(kijunsen_period, ichimoku_df_size):
start_index = index - kijunsen_period
last_index = index + 1
kijunsen_min = dataframe['low'][start_index:last_index].min()
kijunsen_max = dataframe['high'][start_index:last_index].max()
ichimoku_values['kijunsen'][index] = (kijunsen_min + kijunsen_max) / 2
for index in range(leading_span_b_period, ichimoku_df_size):
start_index = index - leading_span_b_period
last_index = index + 1
leading_span_b_min = dataframe['low'][start_index:last_index].min()
leading_span_b_max = dataframe['high'][start_index:last_index].max()
ichimoku_values['leading_span_b'][index] = (
leading_span_b_min + leading_span_b_max
) / 2
ichimoku_values['leading_span_a'] = (
ichimoku_values['tenkansen'] + ichimoku_values['kijunsen']
) / 2
ichimoku_values.dropna(how='any', inplace=True)
ichimoku_df_size = ichimoku_values.shape[0]
ichimoku_values['is_hot'] = False
ichimoku_values['is_cold'] = False
for index in range(0, ichimoku_df_size):
span_hot = ichimoku_values['leading_span_a'][index] > ichimoku_values['leading_span_b'][index]
close_hot = dataframe['close'][index] > ichimoku_values['leading_span_a'][index]
if hot_thresh:
ichimoku_values.at[ichimoku_values.index[index], 'is_hot'] = span_hot and close_hot
span_cold = ichimoku_values['leading_span_a'][index] < ichimoku_values['leading_span_b'][index]
close_cold = dataframe['close'][index] < ichimoku_values['leading_span_a'][index]
if cold_thresh:
ichimoku_values.at[ichimoku_values.index[index], 'is_cold'] = span_cold and close_cold
return ichimoku_values
| {"hexsha": "4f8cbfab1bff7d3a3ff4e2a079d5b014530f2be7", "size": 3903, "ext": "py", "lang": "Python", "max_stars_repo_path": "app/analyzers/indicators/ichimoku.py", "max_stars_repo_name": "ferreiramarcelo/telegram-crypto-signals", "max_stars_repo_head_hexsha": "321d9305de5b98cc1c70bec293cafd0ce7432db3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1857, "max_stars_repo_stars_event_min_datetime": "2020-09-03T16:15:27.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T23:08:43.000Z", "max_issues_repo_path": "app/analyzers/indicators/ichimoku.py", "max_issues_repo_name": "ExBotTrader/Crypto-Signal", "max_issues_repo_head_hexsha": "7c6871baa5023e0e4cc14f4cd5ae10ac5592698d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 154, "max_issues_repo_issues_event_min_datetime": "2018-02-26T12:37:57.000Z", "max_issues_repo_issues_event_max_datetime": "2020-08-26T13:06:08.000Z", "max_forks_repo_path": "app/analyzers/indicators/ichimoku.py", "max_forks_repo_name": "ExBotTrader/Crypto-Signal", "max_forks_repo_head_hexsha": "7c6871baa5023e0e4cc14f4cd5ae10ac5592698d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 535, "max_forks_repo_forks_event_min_datetime": "2020-09-04T22:49:14.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-27T16:52:07.000Z", "avg_line_length": 41.0842105263, "max_line_length": 119, "alphanum_fraction": 0.6443761209, "include": true, "reason": "import numpy", "num_tokens": 931} |
using SymbolicCodegen
using Test
@testset "SymbolicCodegen.jl" begin
# Write your tests here.
end
| {"hexsha": "195f970efb88ccf62bc5d8e37b140e2aabaf5c08", "size": 103, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "cscherrer/SymbolicCodegen.jl", "max_stars_repo_head_hexsha": "97815f9d900da37aee54a87b6a1f5baf9a528212", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-01-10T14:33:11.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-08T12:36:03.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "cscherrer/SymbolicCodegen.jl", "max_issues_repo_head_hexsha": "97815f9d900da37aee54a87b6a1f5baf9a528212", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-01-14T03:07:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-03T20:37:06.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "cscherrer/SymbolicCodegen.jl", "max_forks_repo_head_hexsha": "97815f9d900da37aee54a87b6a1f5baf9a528212", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.7142857143, "max_line_length": 35, "alphanum_fraction": 0.7669902913, "num_tokens": 30} |
# coding: utf-8
# # Assignment 2
#
# Before working on this assignment please read these instructions fully. In the submission area, you will notice that you can click the link to **Preview the Grading** for each step of the assignment. This is the criteria that will be used for peer grading. Please familiarize yourself with the criteria before beginning the assignment.
#
# An NOAA dataset has been stored in the file `data/C2A2_data/BinnedCsvs_d100/4e86d2106d0566c6ad9843d882e72791333b08be3d647dcae4f4b110.csv`. The data for this assignment comes from a subset of The National Centers for Environmental Information (NCEI) [Daily Global Historical Climatology Network](https://www1.ncdc.noaa.gov/pub/data/ghcn/daily/readme.txt) (GHCN-Daily). The GHCN-Daily is comprised of daily climate records from thousands of land surface stations across the globe.
#
# Each row in the assignment datafile corresponds to a single observation.
#
# The following variables are provided to you:
#
# * **id** : station identification code
# * **date** : date in YYYY-MM-DD format (e.g. 2012-01-24 = January 24, 2012)
# * **element** : indicator of element type
# * TMAX : Maximum temperature (tenths of degrees C)
# * TMIN : Minimum temperature (tenths of degrees C)
# * **value** : data value for element (tenths of degrees C)
#
# For this assignment, you must:
#
# 1. Read the documentation and familiarize yourself with the dataset, then write some python code which returns a line graph of the record high and record low temperatures by day of the year over the period 2005-2014. The area between the record high and record low temperatures for each day should be shaded.
# 2. Overlay a scatter of the 2015 data for any points (highs and lows) for which the ten year record (2005-2014) record high or record low was broken in 2015.
# 3. Watch out for leap days (i.e. February 29th), it is reasonable to remove these points from the dataset for the purpose of this visualization.
# 4. Make the visual nice! Leverage principles from the first module in this course when developing your solution. Consider issues such as legends, labels, and chart junk.
#
# The data you have been given is near **None, None, Singapore**, and the stations the data comes from are shown on the map below.
# In[1]:
import matplotlib.pyplot as plt
import mplleaflet
import pandas as pd
def leaflet_plot_stations(binsize, hashid):
df = pd.read_csv('data/C2A2_data/BinSize_d{}.csv'.format(binsize))
station_locations_by_hash = df[df['hash'] == hashid]
lons = station_locations_by_hash['LONGITUDE'].tolist()
lats = station_locations_by_hash['LATITUDE'].tolist()
plt.figure(figsize=(8,8))
plt.scatter(lons, lats, c='r', alpha=0.7, s=200)
return mplleaflet.display()
leaflet_plot_stations(100,'4e86d2106d0566c6ad9843d882e72791333b08be3d647dcae4f4b110')
# In[2]:
df = pd.read_csv('data/C2A2_data/BinnedCsvs_d100/4e86d2106d0566c6ad9843d882e72791333b08be3d647dcae4f4b110.csv')
# In[3]:
df.sort(['ID','Date']).head()
# In[4]:
df['Year'], df['Month-Date'] = zip(*df['Date'].apply(lambda x: (x[:4], x[5:])))
df = df[df['Month-Date'] != '02-29']
# In[5]:
import numpy as np
temp_min = df[(df['Element'] == 'TMIN') & (df['Year'] != '2015')].groupby('Month-Date').aggregate({'Data_Value':np.min})
temp_max = df[(df['Element'] == 'TMAX') & (df['Year'] != '2015')].groupby('Month-Date').aggregate({'Data_Value':np.max})
# In[6]:
temp_min.head()
# In[7]:
temp_min_15 = df[(df['Element'] == 'TMIN') & (df['Year'] == '2015')].groupby('Month-Date').aggregate({'Data_Value':np.min})
temp_max_15 = df[(df['Element'] == 'TMAX') & (df['Year'] == '2015')].groupby('Month-Date').aggregate({'Data_Value':np.max})
# In[8]:
broken_min = np.where(temp_min_15['Data_Value'] < temp_min['Data_Value'])[0]
broken_max = np.where(temp_max_15['Data_Value'] > temp_max['Data_Value'])[0]
# In[9]:
broken_max, broken_min
# In[10]:
temp_min_15.head()
# In[11]:
plt.figure()
plt.plot(temp_min.values, 'b', label = 'record low')
plt.plot(temp_max.values, 'r', label = 'record high')
plt.scatter(broken_min, temp_min_15.iloc[broken_min], s = 10, c = 'g', label = 'broken low')
plt.scatter(broken_max, temp_max_15.iloc[broken_max], s = 10, c = 'm', label = 'broken high')
plt.gca().axis([-5, 370, -150, 650])
plt.xticks(range(0, len(temp_min), 20), temp_min.index[range(0, len(temp_min), 20)], rotation = '45')
plt.xlabel('Day of the Year')
plt.ylabel('Temperature (Tenths of Degrees C)')
plt.title('Temperature Summary Plot near Singapore')
plt.legend(loc = 4, frameon = False)
plt.gca().fill_between(range(len(temp_min)), temp_min['Data_Value'], temp_max['Data_Value'], facecolor = 'yellow', alpha = 0.5)
plt.gca().spines['top'].set_visible(False)
plt.gca().spines['right'].set_visible(False)
plt.show()
| {"hexsha": "e7cbcae9caaefdd61d0dd2c1b7746491c06661ee", "size": 4815, "ext": "py", "lang": "Python", "max_stars_repo_path": "reference/coursera-Applied-Data-Science-with-Python/Applied-Plotting-Charting-And-Data-Representation-in-Python/week2/Assignment2.py", "max_stars_repo_name": "shijiansu/coursera-applied-data-science-with-python", "max_stars_repo_head_hexsha": "a0f2bbd0b9201805f26d18b73a25183cf0b3a0e9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "reference/coursera-Applied-Data-Science-with-Python/Applied-Plotting-Charting-And-Data-Representation-in-Python/week2/Assignment2.py", "max_issues_repo_name": "shijiansu/coursera-applied-data-science-with-python", "max_issues_repo_head_hexsha": "a0f2bbd0b9201805f26d18b73a25183cf0b3a0e9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "reference/coursera-Applied-Data-Science-with-Python/Applied-Plotting-Charting-And-Data-Representation-in-Python/week2/Assignment2.py", "max_forks_repo_name": "shijiansu/coursera-applied-data-science-with-python", "max_forks_repo_head_hexsha": "a0f2bbd0b9201805f26d18b73a25183cf0b3a0e9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.4672131148, "max_line_length": 480, "alphanum_fraction": 0.7165109034, "include": true, "reason": "import numpy", "num_tokens": 1393} |
"""
This file contains logic responsible for teaching model on a processed dataset. It prints it's accuracy.
Based on Paul van Gent's code from blog post: http://www.paulvangent.com/2016/04/01/emotion-recognition-with-python-opencv-and-a-face-dataset/
"""
import glob
import random
import numpy as np
import cv2
from image_commons import load_image
fishface = cv2.face.FisherFaceRecognizer_create()
training_set_size = 0.95
def get_files(emotion):
"""
gets paths to all images of given emotion and splits them into two sets: trainging and test
:param emotion: name of emotion to find images for
"""
files = glob.glob("data/sorted_set/%s/*" % emotion)
random.shuffle(files)
training = files[:int(len(files) * training_set_size)]
prediction = files[-int(len(files) * (1 - training_set_size)):]
return training, prediction
def make_sets():
"""
method used to create datasets for all emotions. It loads both images and its labels to memory into training and test labels
"""
training_data = []
training_labels = []
prediction_data = []
prediction_labels = []
for emotion in emotions:
training, prediction = get_files(emotion)
for item in training:
training_data.append(load_image(item))
training_labels.append(emotions.index(emotion))
for item in prediction:
prediction_data.append(load_image(item))
prediction_labels.append(emotions.index(emotion))
return training_data, training_labels, prediction_data, prediction_labels
def run_recognizer():
"""
method is creating datasets using make_sets method, then it trains a model and tet with a test set. It returns correct guesses to test data count ratio
"""
training_data, training_labels, prediction_data, prediction_labels = make_sets()
print("size of training set is:", len(training_labels), "images")
fishface.train(training_data, np.asarray(training_labels))
print("predicting classification set")
correct = sum(1 for id, image in enumerate(prediction_data) if fishface.predict(image)[0] == prediction_labels[id])
return ((100 * correct) / len(prediction_data))
if __name__ == '__main__':
emotions = ["neutral", "anger", "disgust", "happy", "sadness", "surprise"]
for i in range(0, 1):
correct = run_recognizer()
print("got", correct, "percent correct!")
fishface.save('models/emotion_detection_model.xml')
| {"hexsha": "b00bb9fcad7808d1230ec6724803b0e91e078089", "size": 2482, "ext": "py", "lang": "Python", "max_stars_repo_path": "prepare_model.py", "max_stars_repo_name": "Navaneethg98/facemoji", "max_stars_repo_head_hexsha": "7ee5dc5f9aa332847111f02c493c94eb2efdcdd3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 130, "max_stars_repo_stars_event_min_datetime": "2017-10-09T13:56:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T19:58:07.000Z", "max_issues_repo_path": "prepare_model.py", "max_issues_repo_name": "Navaneethg98/facemoji", "max_issues_repo_head_hexsha": "7ee5dc5f9aa332847111f02c493c94eb2efdcdd3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2018-10-30T09:51:46.000Z", "max_issues_repo_issues_event_max_datetime": "2019-07-16T08:45:06.000Z", "max_forks_repo_path": "prepare_model.py", "max_forks_repo_name": "Navaneethg98/facemoji", "max_forks_repo_head_hexsha": "7ee5dc5f9aa332847111f02c493c94eb2efdcdd3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 47, "max_forks_repo_forks_event_min_datetime": "2017-09-22T03:27:35.000Z", "max_forks_repo_forks_event_max_datetime": "2021-02-01T16:57:55.000Z", "avg_line_length": 33.5405405405, "max_line_length": 155, "alphanum_fraction": 0.7054794521, "include": true, "reason": "import numpy", "num_tokens": 549} |
program t
integer,dimension(3)::i=(/1,2,3/)
where (i>1) i=i*2
print *,i
end program t
| {"hexsha": "8954cf41fbdd84fb503988e5820d2c69276148b4", "size": 116, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "tests/t0116x/t.f", "max_stars_repo_name": "maddenp/ppp", "max_stars_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-08-13T16:32:02.000Z", "max_stars_repo_stars_event_max_datetime": "2021-06-21T12:37:58.000Z", "max_issues_repo_path": "tests/t0116x/t.f", "max_issues_repo_name": "maddenp/ppp", "max_issues_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/t0116x/t.f", "max_forks_repo_name": "maddenp/ppp", "max_forks_repo_head_hexsha": "81956c0fc66ff742531817ac9028c4df940cc13e", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2015-07-30T17:02:27.000Z", "max_forks_repo_forks_event_max_datetime": "2015-08-03T16:29:41.000Z", "avg_line_length": 19.3333333333, "max_line_length": 39, "alphanum_fraction": 0.4827586207, "num_tokens": 42} |
# Scenario E - Peakshape Variation (pVoigt model - results evaluation)
This file is used to evaluate the inference results.
In this scenario the peakshape in a spectrum with a fixed number of peaks is varied from Gaussian (n = 0.0) to Lorentzian (n = 1.0). All datasets contain 3 peaks and the noise level is kept constant at 1%.
The model used in the inference of the parameters is formulated as follows:
\begin{equation}
\large y = f(x) = \sum\limits_{m=1}^M \big[A_m \cdot f_{pseudo-Voigt}(x)\big] + \epsilon
\end{equation}
where:
\begin{equation}
\large f_{pseudo-Voigt}(x) = \eta \cdot \frac{\sigma_m^2}{(x-\mu_m)^2 + \sigma_m^2} + (1 - \eta) \cdot e^{-\frac{(x-\mu_m)^2}{2\cdot\sigma_m^2}}
\end{equation}
```python
%matplotlib inline
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import pymc3 as pm
import arviz as az
#az.style.use('arviz-darkgrid')
print('Running on PyMC3 v{}'.format(pm.__version__))
```
WARNING (theano.tensor.blas): Using NumPy C-API based implementation for BLAS functions.
Running on PyMC3 v3.8
## Load results summary
```python
# load results from disk
#fname = './scenario_peakshape_pvoigt.csv'
fname = './scenario_peakshape_pvoigt_lrun.csv'
df = pd.read_csv(fname)
df.index += 1
df
```
<div>
<style scoped>
.dataframe tbody tr th:only-of-type {
vertical-align: middle;
}
.dataframe tbody tr th {
vertical-align: top;
}
.dataframe thead th {
text-align: right;
}
</style>
<table border="1" class="dataframe">
<thead>
<tr style="text-align: right;">
<th></th>
<th>r_hat</th>
<th>mcse</th>
<th>ess</th>
<th>bfmi</th>
<th>r2</th>
<th>waic</th>
<th>epsilon</th>
<th>epsilon_real</th>
<th>eta</th>
<th>eta_real</th>
<th>cat</th>
</tr>
</thead>
<tbody>
<tr>
<th>1</th>
<td>1.780909</td>
<td>0.226636</td>
<td>104.363636</td>
<td>1.249257</td>
<td>0.999856</td>
<td>-4675.470979</td>
<td>0.049972</td>
<td>0.05</td>
<td>0.005661</td>
<td>0.0</td>
<td>1</td>
</tr>
<tr>
<th>2</th>
<td>1.000000</td>
<td>0.000000</td>
<td>4383.363636</td>
<td>1.029887</td>
<td>0.999035</td>
<td>-2331.742859</td>
<td>0.109758</td>
<td>0.05</td>
<td>0.000332</td>
<td>0.0</td>
<td>1</td>
</tr>
<tr>
<th>3</th>
<td>1.070909</td>
<td>0.049455</td>
<td>684.363636</td>
<td>1.014762</td>
<td>0.999820</td>
<td>-4207.406117</td>
<td>0.059134</td>
<td>0.05</td>
<td>0.000384</td>
<td>0.0</td>
<td>1</td>
</tr>
<tr>
<th>4</th>
<td>1.005455</td>
<td>0.003818</td>
<td>1720.727273</td>
<td>1.037991</td>
<td>0.999818</td>
<td>-4715.487126</td>
<td>0.050034</td>
<td>0.05</td>
<td>0.008271</td>
<td>0.0</td>
<td>1</td>
</tr>
<tr>
<th>5</th>
<td>1.000000</td>
<td>0.000727</td>
<td>2124.454545</td>
<td>1.044191</td>
<td>0.999872</td>
<td>-4726.544298</td>
<td>0.049924</td>
<td>0.05</td>
<td>0.001852</td>
<td>0.0</td>
<td>1</td>
</tr>
<tr>
<th>...</th>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<th>196</th>
<td>1.000000</td>
<td>0.000182</td>
<td>2297.909091</td>
<td>1.000964</td>
<td>0.999846</td>
<td>-4766.885947</td>
<td>0.049235</td>
<td>0.05</td>
<td>0.998066</td>
<td>1.0</td>
<td>5</td>
</tr>
<tr>
<th>197</th>
<td>1.450909</td>
<td>0.557818</td>
<td>1605.181818</td>
<td>1.008004</td>
<td>0.999950</td>
<td>-4836.525261</td>
<td>0.048113</td>
<td>0.05</td>
<td>0.999041</td>
<td>1.0</td>
<td>5</td>
</tr>
<tr>
<th>198</th>
<td>1.000000</td>
<td>0.000000</td>
<td>4905.090909</td>
<td>1.054635</td>
<td>0.999424</td>
<td>-2786.356407</td>
<td>0.094399</td>
<td>0.05</td>
<td>0.977116</td>
<td>1.0</td>
<td>5</td>
</tr>
<tr>
<th>199</th>
<td>1.000000</td>
<td>0.000000</td>
<td>3922.636364</td>
<td>0.997197</td>
<td>0.999916</td>
<td>-4743.426499</td>
<td>0.049649</td>
<td>0.05</td>
<td>0.999503</td>
<td>1.0</td>
<td>5</td>
</tr>
<tr>
<th>200</th>
<td>1.000000</td>
<td>0.000000</td>
<td>3774.636364</td>
<td>1.053122</td>
<td>0.999882</td>
<td>-4733.435338</td>
<td>0.049827</td>
<td>0.05</td>
<td>0.999430</td>
<td>1.0</td>
<td>5</td>
</tr>
</tbody>
</table>
<p>200 rows × 11 columns</p>
</div>
```python
# number of succesfull convergences
suc = df.loc[(df['r_hat'] <= 1.1) & (df['r2'] >= 0.99)]
len(suc)
```
189
```python
import seaborn as sns
sns.set(style="ticks", rc={'figure.figsize':(8,6)}, font_scale=1.35)
# color palette
pal = sns.cubehelix_palette(8, rot=-.5, dark=.3)
ax = sns.violinplot(x='eta_real', y='epsilon', data=suc, palette=pal, linewidth=2) \
.set_title("Peakshape eta (real) vs. Noise level (inferred)")
plt.savefig('violinplot_noise.png', dpi=150)
```
```python
#ax = sns.boxplot(x='eta_real', y='epsilon', data=suc, palette=pal, linewidth=2, fliersize=2) \
# .set_title("Peakshape eta (real) vs. Noise level (inferred)")
ax = sns.boxplot(x='eta_real', y='epsilon', data=suc, palette=sns.color_palette("Purples"),
linewidth=1, fliersize=2.5)
plt.savefig('boxplot_noise.png', dpi=150)
```
```python
ax = sns.violinplot(x='eta_real', y='eta', data=suc, palette=pal, linewidth=2) \
.set_title("Peakshape eta (real) vs. Peakshape eta (inferred)")
plt.savefig('violinplot_peakshape.png', dpi=150)
```
```python
#ax = sns.boxplot(x='eta_real', y='eta', data=suc, palette=pal, linewidth=2, fliersize=2) \
# .set_title("Peakshape eta (real) vs. Peakshape eta (inferred)")
#ax = sns.boxplot(x='eta_real', y='eta', data=suc, palette=pal, linewidth=2, fliersize=2)
ax = sns.boxplot(x='eta_real', y='eta', data=suc, palette=sns.color_palette("Purples"),
linewidth=1, fliersize=2.5)
plt.savefig('boxplot_peakshape.png', dpi=150)
```
```python
# table values
suc_000 = suc.loc[(suc['eta_real'] == 0.0)]
print("N = {0}".format(len(suc_000)))
suc_000['eta'].quantile([.25, .5, .75])
```
N = 35
0.25 0.000404
0.50 0.000938
0.75 0.001384
Name: eta, dtype: float64
```python
suc_025 = suc.loc[(suc['eta_real'] == 0.25)]
print("N = {0}".format(len(suc_025)))
suc_025['eta'].quantile([.25, .5, .75])
```
N = 39
0.25 0.227337
0.50 0.246641
0.75 0.250805
Name: eta, dtype: float64
```python
suc_050 = suc.loc[(suc['eta_real'] == 0.5)]
print("N = {0}".format(len(suc_050)))
suc_050['eta'].quantile([.25, .5, .75])
```
N = 39
0.25 0.489433
0.50 0.499222
0.75 0.500169
Name: eta, dtype: float64
```python
suc_075 = suc.loc[(suc['eta_real'] == 0.75)]
print("N = {0}".format(len(suc_075)))
suc_075['eta'].quantile([.25, .5, .75])
```
N = 38
0.25 0.733126
0.50 0.748641
0.75 0.750410
Name: eta, dtype: float64
```python
suc_100 = suc.loc[(suc['eta_real'] == 1.0)]
print("N = {0}".format(len(suc_100)))
suc_100['eta'].quantile([.25, .5, .75])
```
N = 38
0.25 0.990277
0.50 0.997331
0.75 0.998399
Name: eta, dtype: float64
```python
ax = sns.scatterplot(x="eta", y="epsilon", data=suc, hue="eta_real", s=100,
palette="jet", legend="full")
plt.savefig('scatterplot_noise_peakshape.png', dpi=150)
```
## Statistics per peak shape factor
```python
for i in [1,2,3,4,5]:
cat = df.loc[(df['cat'] == i)]
print('cat: {0} r-hat: {1:.2f}'.format(i, cat['r_hat'].mean()))
print('cat: {0} r2 : {1:.4f}'.format(i, cat['r2'].mean()))
print('cat: {0} waic : {1:.2f}'.format(i, cat['waic'].mean()))
print('cat: {0} mcse : {1:.4f}'.format(i, cat['mcse'].mean()))
print('cat: {0} ess : {1:.1f}'.format(i, cat['ess'].mean()))
print('cat: {0} bfmi : {1:.4f}'.format(i, cat['bfmi'].mean()))
print('cat: {0} epsi : {1:.4f}'.format(i, cat['epsilon'].mean()))
print('cat: {0} eta : {1:.4f}'.format(i, cat['eta'].mean()))
print('\n')
```
cat: 1 r-hat: 1.08
cat: 1 r2 : 0.9997
cat: 1 waic : -4342.65
cat: 1 mcse : 0.0823
cat: 1 ess : 3298.4
cat: 1 bfmi : 1.0416
cat: 1 epsi : 0.0585
cat: 1 eta : 0.0014
cat: 2 r-hat: 1.01
cat: 2 r2 : 0.9995
cat: 2 waic : -3972.92
cat: 2 mcse : 0.0267
cat: 2 ess : 3879.6
cat: 2 bfmi : 1.0516
cat: 2 epsi : 0.0762
cat: 2 eta : 0.2313
cat: 3 r-hat: 1.01
cat: 3 r2 : 0.9995
cat: 3 waic : -4069.55
cat: 3 mcse : 0.0199
cat: 3 ess : 3676.4
cat: 3 bfmi : 1.0493
cat: 3 epsi : 0.0713
cat: 3 eta : 0.4816
cat: 4 r-hat: 1.00
cat: 4 r2 : 0.9988
cat: 4 waic : -3637.68
cat: 4 mcse : 0.0039
cat: 4 ess : 3647.7
cat: 4 bfmi : 1.0568
cat: 4 epsi : 0.0971
cat: 4 eta : 0.7067
cat: 5 r-hat: 1.02
cat: 5 r2 : 0.9996
cat: 5 waic : -4145.78
cat: 5 mcse : 0.0217
cat: 5 ess : 3287.5
cat: 5 bfmi : 1.0257
cat: 5 epsi : 0.0649
cat: 5 eta : 0.9747
```python
```
| {"hexsha": "272adc201c79cece126e4d97ad88200211080207", "size": 148485, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "code/scenarios/scenario_e/scenario_peakshape_pvoigt_evaluation.ipynb", "max_stars_repo_name": "jnispen/PPSDA", "max_stars_repo_head_hexsha": "910261551dd08768a72ab0a3e81bd73c706a143a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-07T02:22:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-07T02:22:25.000Z", "max_issues_repo_path": "code/scenarios/scenario_e/scenario_peakshape_pvoigt_evaluation.ipynb", "max_issues_repo_name": "jnispen/PPSDA", "max_issues_repo_head_hexsha": "910261551dd08768a72ab0a3e81bd73c706a143a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/scenarios/scenario_e/scenario_peakshape_pvoigt_evaluation.ipynb", "max_forks_repo_name": "jnispen/PPSDA", "max_forks_repo_head_hexsha": "910261551dd08768a72ab0a3e81bd73c706a143a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 203.1258549932, "max_line_length": 38820, "alphanum_fraction": 0.8900023571, "converted": true, "num_tokens": 3969} |
[STATEMENT]
lemma poly_ring_one_mono:
assumes "n \<le> m"
shows "\<one>\<^bsub>R[\<X>\<^bsub>n\<^esub>]\<^esub> = \<one>\<^bsub>coord_ring R m\<^esub>"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<one>\<^bsub>R [\<X>\<^bsub>n\<^esub>]\<^esub> = \<one>\<^bsub>R [\<X>\<^bsub>m\<^esub>]\<^esub>
[PROOF STEP]
by (metis R.Pring_one coord_ring_def) | {"llama_tokens": 167, "file": "Padic_Field_Ring_Powers", "length": 1} |
import matplotlib.pyplot as plt
import networkx as nx
import numpy as np
import pandas as pd
import altair as alt
from pylab import rcParams
from .common import *
alt.data_transformers.disable_max_rows()
from networkx.drawing.nx_agraph import graphviz_layout, to_agraph
import pygraphviz as pgv
from IPython.display import Image
def draw(A):
return Image(A.draw(format='png', prog='dot'))
def D_as_graph(D,file=None):
G = nx.DiGraph()
for i in D.index:
for j in D.columns:
if D.loc[i,j] != 0:
G.add_edge(i,j,width=D.loc[i,j],label=D.loc[i,j])
A = to_agraph(G)
A.layout('dot')
if file is not None:
A.draw(file)
return draw(A)
# Given something like:
# A = [4, 10, 1, 12, 3, 9, 0, 6, 5, 11, 2, 8, 7]
# B = [5, 4, 10, 1, 7, 6, 12, 3, 9, 0, 11, 2, 8]
def AB_to_P2(A,B):
P2 = pd.DataFrame(np.array([A,B]))
return P2
def spider3(perm1,perm2,file=None,fig_format="PNG",width=5,height=10,font_size=8,xmult = 2,ymult=1.2):
assert len(perm1) == len(perm2)
assert type(perm1) == pd.Series
assert type(perm2) == pd.Series
assert perm1.name != perm2.name
rcParams['figure.figsize'] = width, height
#rcParams['figure.constrained_layout.h_pad'] = 5
#plt.tight_layout()
plt.clf()
G = nx.Graph()
pos = {}
buffer = 0.25
step = (2-2*buffer)/len(perm1)
labels={}
y1 = []
y2 = []
y = []
index = []
for i in range(len(perm1)):
name1 = f"{perm1.name}:{perm1.iloc[i]}"
name2 = f"{perm2.name}:{perm2.iloc[i]}"
G.add_node(name1)
G.add_node(name2)
loc = 1-buffer-(i*step)
pos[name1] = np.array([-1,loc])
pos[name2] = np.array([1,loc])
labels[name1] = perm1.index[i]
labels[name2] = perm2.index[i]
y1.append(name1)
y2.append(name2)
y.append("A")
y.append("B")
index.append(name1)
index.append(name2)
y=pd.Series(y,index=index)
for i in range(len(perm1)):
name1 = f"{perm1.name}:{perm1.iloc[i]}"
ix = np.where(perm1.iloc[i] == perm2)[0][0]
name2 = f"{perm2.name}:{perm2.iloc[ix]}"
G.add_edge(name1, name2)
edges = G.edges()
nx.draw_networkx_labels(G,pos=pos,labels=labels,font_size=font_size)
color_map = y.map({"A":"white","B":"white"})
nx.draw(G, pos, node_color=color_map)
xmax= xmult*max(xx for xx,yy in pos.values())
ymax= ymult*max(yy for xx,yy in pos.values())
plt.xlim(-xmax,xmax)
plt.ylim(-ymax,ymax)
#A = to_agraph(G)
#A.layout('dot')
#nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
if file is not None:
plt.savefig(file)
def spider2(perm1,perm2,file=None,fig_format="PNG",width=5,height=10,font_size=8,xmult = 2,ymult=1.2):
assert len(perm1) == len(perm2)
assert type(perm1) == pd.Series
assert type(perm2) == pd.Series
assert perm1.name != perm2.name
rcParams['figure.figsize'] = width, height
#rcParams['figure.constrained_layout.h_pad'] = 5
#plt.tight_layout()
plt.clf()
G = nx.Graph()
pos = {}
buffer = 0.25
step = (2-2*buffer)/len(perm1)
labels={}
y1 = []
y2 = []
y = []
index = []
for i in range(len(perm1)):
name1 = f"{perm1.name}:{perm1.loc[i]}"
name2 = f"{perm2.name}:{perm2.loc[i]}"
G.add_node(name1)
G.add_node(name2)
loc = 1-buffer-(i*step)
pos[name1] = np.array([-1,loc])
pos[name2] = np.array([1,loc])
labels[name1] = perm1.loc[i]
labels[name2] = perm2.loc[i]
y1.append(name1)
y2.append(name2)
y.append("A")
y.append("B")
index.append(name1)
index.append(name2)
y=pd.Series(y,index=index)
for i in range(len(perm1)):
name1 = f"{perm1.name}:{perm1.loc[i]}"
ix = np.where(perm1.loc[i] == perm2)[0][0]
name2 = f"{perm2.name}:{perm2.loc[ix]}"
G.add_edge(name1, name2)
edges = G.edges()
nx.draw_networkx_labels(G,pos=pos,labels=labels,font_size=font_size)
color_map = y.map({"A":"white","B":"white"})
nx.draw(G, pos, node_color=color_map)
xmax= xmult*max(xx for xx,yy in pos.values())
ymax= ymult*max(yy for xx,yy in pos.values())
plt.xlim(-xmax,xmax)
plt.ylim(-ymax,ymax)
#A = to_agraph(G)
#A.layout('dot')
#nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
if file is not None:
plt.savefig(file)
def spider(P2,file=None,fig_format="PNG",width=5,height=10,font_size=8):
"""
from pyrankability.plot import spider, AB_to_P2
A = [4, 10, 1, 12, 3, 9, 0, 6, 5, 11, 2, 8, 7]
B = [5, 4, 10, 1, 7, 6, 12, 3, 9, 0, 11, 2, 8]
spider(AB_to_P2(A,B))
"""
rcParams['figure.figsize'] = width, height
G = nx.Graph()
pos = {}
buffer = 0.25
step = (2-2*buffer)/P2.shape[1]
labels={}
y1 = []
y2 = []
y = []
index = []
for i in range(P2.shape[1]):
v = str(i+1)
name1 = f"A{v}:{P2.iloc[0,i]}"
name2 = f"B{v}:{P2.iloc[1,i]}"
#name2 = "B%d:%d"%(i+1,P2.iloc[1,i])
G.add_node(name1)
G.add_node(name2)
loc = 1-buffer-(i*step)
pos[name1] = np.array([-1,loc])
pos[name2] = np.array([1,loc])
labels[name1] = P2.iloc[0,i]
labels[name2] = P2.iloc[1,i]
y1.append(name1)
y2.append(name2)
y.append("A")
y.append("B")
index.append(name1)
index.append(name2)
y=pd.Series(y,index=index)
for i in range(P2.shape[1]):
v=str(i+1)
name1 = f"A{v}:{P2.iloc[0,i]}"
#name1 = "A%d:%d"%(i+1,P2.iloc[0,i])
ix = np.where(P2.iloc[1,:] == P2.iloc[0,i])[0][0]
v=str(ix+1)
name2 = f"B{v}:{P2.iloc[0,i]}"
#name2 = "B%d:%d"%(ix+1,P2.iloc[0,i])
G.add_edge(name1, name2)
edges = G.edges()
nx.draw_networkx_labels(G,pos=pos,labels=labels,font_size=font_size)
color_map = y.map({"A":"white","B":"white"})
nx.draw(G, pos, node_color=color_map)
#A = to_agraph(G)
#A.layout('dot')
#nx.draw_networkx_edge_labels(G, pos, edge_labels=edge_labels)
if file is not None:
#A.draw(file)
plt.savefig(file)
def show_score_xstar(xstars,indices=None,group_label="Group",fixed_r=None,resolve_scale=False,columns=1,width=300,height=300):
all_df = pd.DataFrame(columns=["i","j","x",group_label,"ri","rj"])
score_df = pd.DataFrame(columns=["num_frac_xstar_upper","num_one_xstar_upper","num_zero_xstar_upper"])
score_df.index.name = group_label
ordered_xstars = {}
for key in xstars.keys():
x = xstars[key].copy()
if fixed_r is not None and key in fixed_r:
r = fixed_r[key]
else:
r = x.sum(axis=0)
order = np.argsort(r)
xstar = x.copy().iloc[order,:].iloc[:,order]
xstar.loc[:,:] = threshold_x(xstar.values)
if indices is not None:
x = x.iloc[indices[key],:].iloc[:,indices[key]]
ordered_xstars[key] = xstar
inxs = np.triu_indices(len(xstar),k=1)
xstar_upper = xstar.values[inxs[0],inxs[1]]
nfrac_upper = sum((xstar_upper > 0) & (xstar_upper < 1))
none_upper = sum(xstar_upper == 1)
nzero_upper = sum(xstar_upper == 0)
score_df = score_df.append(pd.Series([nfrac_upper,none_upper,nzero_upper],index=score_df.columns,name=key))
#rixs = np.argsort(r)
#x = x.iloc[:,rixs].iloc[rixs,:]#np.ix_(rixs,rixs)]
df = (1-x).stack().reset_index()
df.columns=["i","j","x"]
df["ri"] = list(r.loc[df["i"]])
df["rj"] = list(r.loc[df["j"]])
df[group_label] = key
all_df = all_df.append(df)
#all_df = all_df.loc[(all_df.x != 0) & (all_df.x != 1)]
g = alt.Chart(all_df,width=width).mark_square().encode(
x=alt.X(
'i:N',
axis=alt.Axis(labelOverlap=False),
title="r",
sort=alt.EncodingSortField(field="ri",order="ascending") # The order to sort in
),
y=alt.Y(
'j:N',
axis=alt.Axis(labelOverlap=False),
title="r",
sort=alt.EncodingSortField(field="rj",order="ascending") # The order to sort in
),
color=alt.Color("x",scale=alt.Scale(scheme='greys'))
).properties(
width=width,
height=height
).facet(
facet=alt.Column("%s:N"%group_label, title=None),
columns=columns
)
if resolve_scale:
g = g.resolve_scale(x='independent',y='independent')
g.configure_title(
fontSize=12,
font='Times',
orient='bottom'
)
return g,score_df,ordered_xstars
def show_single_xstar(x,indices=None,fixed_r=None,
width=300,height=300,
labelFontSize=10,titleFontSize=10,prepare_url_func=None):
ordered_xstars = {}
if fixed_r is not None and key in fixed_r:
r = fixed_r[key]
else:
r = x.sum(axis=0)
order = np.argsort(r)
xstar = x.copy().iloc[order,:].iloc[:,order]
xstar.loc[:,:] = threshold_x(xstar.values)
if indices is not None:
x = x.iloc[indices[key],:].iloc[:,indices[key]]
# For coloring purposes
x.loc[:,:] = threshold_x(x.values)
ordered_xstar = xstar
inxs = np.triu_indices(len(xstar),k=1)
xstar_upper = xstar.values[inxs]
nfrac_upper = sum((xstar_upper > 0) & (xstar_upper < 1))
none_upper = sum(xstar_upper == 1)
nzero_upper = sum(xstar_upper == 0)
score_series = pd.Series([nfrac_upper,none_upper,nzero_upper],
index=["num_frac_xstar_upper","num_one_xstar_upper","num_zero_xstar_upper"])
df = x.stack().reset_index()
df.columns=["i","j","x"]
df["ri"] = list(r.loc[df["i"]])
df["rj"] = list(r.loc[df["j"]])
df.loc[:,"c"] = "white"
df.loc[(df["x"] > 0) & (df["x"] < 1) & (df["ri"] < df["rj"]),"c"] = "green"
df.loc[(df["x"] > 0) & (df["x"] < 1) & (df["ri"] > df["rj"]),"c"] = "red"
df.loc[df["i"] == df["j"],"c"] = "black"
if prepare_url_func is not None:
df_url = prepare_url_func(df)
else:
df_url = df
g = alt.Chart(df_url,width=width).mark_square().encode(
x=alt.X(
'i:N',
axis=alt.Axis(labelOverlap=False,labelFontSize=8),
title="r",
sort=alt.EncodingSortField(field="ri",order="ascending") # The order to sort in
),
y=alt.Y(
'j:N',
axis=alt.Axis(labelOverlap=False,labelFontSize=8),
title="r",
sort=alt.EncodingSortField(field="rj",order="ascending") # The order to sort in
),
color=alt.Color("c:N",scale=None)#alt.Scale(scheme='greys'))
).properties(
width=width,
height=height
).configure_axis(
labelFontSize=labelFontSize,
titleFontSize=titleFontSize
)
return g,score_series,ordered_xstar
def show_score_xstar2(xstars,indices=None,group_label="Group",fixed_r=None,resolve_scale=False,columns=1,width=300,height=300,labelFontSize=12):
all_df = pd.DataFrame(columns=["i","j","x",group_label,"ri","rj"])
score_df = pd.DataFrame(columns=["num_frac_xstar_upper","num_one_xstar_upper","num_zero_xstar_upper"])
score_df.index.name = group_label
ordered_xstars = {}
for key in xstars.keys():
x = xstars[key].copy()
if fixed_r is not None and key in fixed_r:
r = fixed_r[key]
else:
r = x.sum(axis=0)
order = np.argsort(r)
xstar = x.copy().iloc[order,:].iloc[:,order]
xstar.loc[:,:] = threshold_x(xstar.values)
if indices is not None:
x = x.iloc[indices[key],:].iloc[:,indices[key]]
# For coloring purposes
x.loc[:,:] = threshold_x(x.values)
ordered_xstars[key] = xstar
inxs = np.triu_indices(len(xstar),k=1)
xstar_upper = xstar.values[inxs]
#import pdb; pdb.set_trace()
nfrac_upper = sum((xstar_upper > 0) & (xstar_upper < 1))
none_upper = sum(xstar_upper == 1)
nzero_upper = sum(xstar_upper == 0)
score_df = score_df.append(pd.Series([nfrac_upper,none_upper,nzero_upper],index=score_df.columns,name=key))
#rixs = np.argsort(r)
#x = x.iloc[:,rixs].iloc[rixs,:]#np.ix_(rixs,rixs)]
df = x.stack().reset_index()
df.columns=["i","j","x"]
df["ri"] = list(r.loc[df["i"]])
df["rj"] = list(r.loc[df["j"]])
df.loc[:,"c"] = "white"
df.loc[(df["x"] > 0) & (df["x"] < 1) & (df["ri"] < df["rj"]),"c"] = "green"
df.loc[(df["x"] > 0) & (df["x"] < 1) & (df["ri"] > df["rj"]),"c"] = "red"
df.loc[df["i"] == df["j"],"c"] = "black"
df[group_label] = key
all_df = all_df.append(df)
#all_df = all_df.loc[(all_df.x != 0) & (all_df.x != 1)]
g = alt.Chart(all_df,width=width).mark_square().encode(
x=alt.X(
'i:N',
axis=alt.Axis(labelOverlap=False,labelFontSize=8),
title="r",
sort=alt.EncodingSortField(field="ri",order="ascending") # The order to sort in
),
y=alt.Y(
'j:N',
axis=alt.Axis(labelOverlap=False,labelFontSize=8),
title="r",
sort=alt.EncodingSortField(field="rj",order="ascending") # The order to sort in
),
color=alt.Color("c",scale=None)#alt.Scale(scheme='greys'))
).properties(
width=width,
height=height
).facet(
facet=alt.Column(title=None,field=alt.Field(group_label),type='nominal',header=alt.Header(labelFontSize=labelFontSize,labelOrient='bottom')),
#alt.Column("%s:N"%group_label, title=,header=alt.Header(labelBaseline="bottom")),
columns=columns
).configure_axis(
labelFontSize=10,
titleFontSize=10
)
#g= g.configure_title(
# fontSize=12,
# font='Times',
# titleAnchor='bottom'
#)
if resolve_scale:
g = g.resolve_scale(x='independent',y='independent')
return g,score_df,ordered_xstars
def show_hillside(V,P0):
perm=pd.Series(P0,index=V.columns)
r=perm.argsort()
#V_G=V.iloc[perm,:].iloc[:,perm]
#x = pd.DataFrame(details['x'],index=V.index,columns=V.columns).iloc[perm,:].iloc[:,perm]
#r = x.sum(axis=1)
df=V.T.stack().to_frame().reset_index()
df.columns=["team_i_name","team_k_name","v"]
df["ri"] = list(-r.loc[df["team_i_name"]])
df["rk"] = list(r.loc[df["team_k_name"]])
g=alt.Chart(df).mark_circle().encode(
x=alt.X(
'team_i_name:N',
axis=alt.Axis(labelOverlap=False),
title="r",
sort=alt.SortField(field="ri",order="descending") # The order to sort in
),
y=alt.Y(
'team_k_name:N',
axis=alt.Axis(labelOverlap=False),
title="r",
sort=alt.SortField(field="rk",order="ascending") # The order to sort in
),
size='v:Q'
)
return g
| {"hexsha": "35336938a5f1e15a25bad542c30c12f7543e95dc", "size": 15181, "ext": "py", "lang": "Python", "max_stars_repo_path": "pyrankability/plot.py", "max_stars_repo_name": "IGARDS/ranking_toolbox", "max_stars_repo_head_hexsha": "98e2d318c76c92d91bb2c0481efe9879cd3614db", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pyrankability/plot.py", "max_issues_repo_name": "IGARDS/ranking_toolbox", "max_issues_repo_head_hexsha": "98e2d318c76c92d91bb2c0481efe9879cd3614db", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2022-02-07T19:56:51.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-07T20:03:58.000Z", "max_forks_repo_path": "pyrankability/plot.py", "max_forks_repo_name": "IGARDS/ranking_toolbox", "max_forks_repo_head_hexsha": "98e2d318c76c92d91bb2c0481efe9879cd3614db", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.5074946467, "max_line_length": 149, "alphanum_fraction": 0.5627429023, "include": true, "reason": "import numpy,import networkx,from networkx", "num_tokens": 4536} |
\documentclass[12pt,english]{article}
\usepackage{mathptmx}
\usepackage{color}
\usepackage[dvipsnames]{xcolor}
\definecolor{darkblue}{RGB}{0.,0.,139.}
\usepackage[top=1in, bottom=1in, left=1in, right=1in]{geometry}
\usepackage{amsmath}
\usepackage{amstext}
\usepackage{amssymb}
\usepackage{setspace}
\usepackage{lipsum}
\usepackage[authoryear]{natbib}
\usepackage{url}
\usepackage{booktabs}
\usepackage[flushleft]{threeparttable}
\usepackage{graphicx}
\usepackage[english]{babel}
\usepackage{pdflscape}
\usepackage[unicode=true,pdfusetitle,
bookmarks=true,bookmarksnumbered=false,bookmarksopen=false,
breaklinks=true,pdfborder={0 0 0},backref=false,
colorlinks,citecolor=black,filecolor=black,
linkcolor=black,urlcolor=black]
{hyperref}
\usepackage[all]{hypcap} % Links point to top of image, builds on hyperref
\usepackage{breakurl} % Allows urls to wrap, including hyperref
\linespread{2}
\begin{document}
\begin{singlespace}
\title{Machine Learning for Sample Selection Models}
\end{singlespace}
\author{Owen McDevitt\thanks{Department of Economics, University of Oklahoma.\
E-mail~address:~\href{mailto:omcdevitt1022@ou.edu}{omcdevitt1022@ou.edu}}}
% \date{\today}
\date{May 9, 2019}
\maketitle
\begin{abstract}
The existing literature on sample selection models, and problems thereof, is extensive and expanding. However, little has been discussed regarding the use of machine learning as a tool to correct selection bias. Despite this lack of literature, the predictive capabilities of many machine learning algorithms make them an ideal candidate for the task. This paper offers both a primer on the topic and a demonstration of its potential use. To explore the topic, I employ simulated data sets: (1) a large, unbiased, normally distributed reference set as the population, and (2) a biased subset of the population that is "observed". I correct for selection bias in the observed data using each observation's predicted probability of selection as determined by four different classifiers: neural net, random forest, naive bayes, and logit regression. I then compare the effectiveness of each classifier in recovering the true value of $\beta_1$ in the corrected model. Though the neural net is most computationally expensive, it ultimately provides the best correction - slightly better than the standard approach of using a logit model. Overall, the results are promising, and they affirm the potential that machine learning has in application to sample selection models. However, more extensive simulation studies are necessary to make any definitive conclusions regarding its utility in practice.
\end{abstract}
\section{Introduction}
Beginning with the seminal work of James Heckman \cite{heckman_1979}, economists have produced an ever-growing literature on sample selection bias. Sample selection bias occurs when studying a population of data using a sample that is not wholly representative, that is, a non-random sub sample. The literature provides strategies that combat this bias under a variety of assumptions and in a variety of contexts. However the use of machine learning as a tool for correcting selection bias has yet to be seriously explored. Because machine learning models are often exceptional predictors, they provide an ideal candidate for the task. By exploiting this predictive accuracy, one can obtain the probability that a given observation will be "selected" or not. The conditional probability of selection dependent on a vector of observed covariates” is known as the propensity score \cite{rosenbaum_rubin_1983}. This predicted probability can then be included in a regression with the observed data, and this corrected regression model ultimately addresses the bias. I specifically examine the use of four different classifiers in obtaining the predicted probabilities: a Neural Net, a Random Forest, a Naive Bayes classifier, and a logistic regression. The standard approach uses a logistic regression, so I include it for comparison's sake.
\par
To test the effectiveness of the classifiers, I use simulated data based on a sample selection model. First, I simulate a data set from an outcome equation to be used as the population, and then I subset this population according to a selection equation. This subset contains data that is "observed". Due to the non-random sub-setting of the population, the observed data is systematically biased. Thus a linear model using this data produces an incorrect coefficient on the regressor of interest. Therefore, using this model to study the population as a whole will lead to erroneous conclusions. The dangers of failing to make this correction are evidenced by the following canonical example: studying wages.
\par
When studying potential wages of an entire population, selection bias is nearly impossible to avoid. For example, if one has a data set that only contains information for those who are employed, then any study that does not correct for the selection bias will only be able to draw conclusions about wages conditional on employment - not wages in general. Assume that we are studying the effect of education on wages. Also, assume that a given worker chooses to work if his/her wages are above a certain level (reservation wage). In this case, high-education individuals will be well represented because the wage offer they receive will likely be sufficient. However, low education individuals are more likely to be offered wages below their reservation wage. Thus, the only low education individuals that appear in the data set are the ones who are earning sufficiently high wages. Therefore, education is dependent on the error term, causing the effect of education on wages to be biased downward due to a proportion of low education individuals self-selecting out of the workforce. That is, selection into the workforce for low education workers is dependent on the wage offer.
\par
Keeping this in mind, one can see that sufficiently addressing selection bias is vital to making reliable conclusions. The difference between observing wages and observing all potential wage offers is subtle yet paramount. Fortunately, there are many ways to correct for this bias. In this paper, I use classifiers to obtain the predicted probability that a given observation is selected. I then incorporate these probabilities of being selected, or propensity scores, into the model experiencing selection bias. By controlling for the variables that determine selection, I essentially remove the effect of selection from the error term. Thus, it allows us to draw robust conclusions with regard to the entire population and therefore capture the "true" coefficient on the regressor of interest. That is, the value that I assigned for $\beta_1$ in the outcome equation. This whole process is shown in greater detail in the methods section.
\par
Because selection bias is so pervasive, having a flexible, effective, and easy-to-implement method for correction is essential. Machine Learning as a tool for this correction provides all three of the aforementioned qualities. Most importantly, it is comparatively stronger than most existing methods with regard to flexibility, since it does not necessarily rely on as many assumptions.
\section{Literature Review}
The most well-know method for correcting selection bias is the Heckman correction. By viewing the selection bias as an omitted variable problem, one can control for the dependence between the regressor of interest and the error term using a two-step control function \cite{heckman_1979}. The "omitted variable" in this case is $(\epsilon_i \mid \eta_i > -\gamma_1 Z_i)$. Essentially, this is the error conditional on whether or not someone is selected. By incorporating this back into the model using the Inverse-Mills ratio, one is able to correct for the bias.
\par
However, Heckman's solution relies on distributional assumptions for the error terms, specifically that they are jointly normal. When these assumptions are not met, the solution is inconsistent and misleading \cite{goldberger_1983}. Additionally, the correction is inefficient when their exists a correlation between the error term and selection mechanism \cite{puhani_2000}. Since then, the literature has expanded to weaken the assumptions of Heckman's original solution and improve its applicability and performance.
\par
Another popular method in correcting for selecting bias is Propensity Score mathcing (PSM) \cite{rosenbaum_rubin_1983}. PSM uses the observed covariates for each observation to estimate the probability of being treated by matching non-treated observations with treated ones. However, PSM implicitly assumes that all factors impacting this selection and the regressor of interest have already been observed. Moreover, it relies on data being available for the non-treated individuals.
\par
The most popular method for estimating propensity scores is logit regression \cite{austin_2011}. This paper also estimates the predicted probability of being treated with a logit regression, however it is primarily as a comparative baseline for the machine learning methods. Despite logistic regression being standard, the use of machine learning to estimate propensity scores has been shown to be effective. In a simulation study using a Neural Net compared to a logistic regression in a propensity score matching model, the Neural Net was found to produce less numerically biased estimates \cite{setoguchi_schneeweiss_brookhart_glynn_cook_2008}. However, up until now, no extensive simulation study has been done on the topic as a whole.
\par
On a broader scale, machine learning is increasingly contributing to economic literature, especially where it relates to econometrics and causal inference \cite{athey_2018}. Though the aim of machine learning is fundamentally different than that of traditional econometrics, its predictive strength alone makes it useful in a variety of contexts. Machine learning has been applied to many areas including regression discontinuity, difference-in-differences, structural modeling or individual and firm behavior, and more. Lastly, recent research by Chernozhukov has been particularly instrumental in marrying the two fields - causal inference and machine learning \cite{newey_hansen_duflo_robins_chernozhukov_demirer_chetverikov_2017}.
\par
Following the lead of similar simulation studies, this paper begins to overview the application of machine learning as it pertains to sample selection models. Eventually, it may serve as a guide to empirical use. This will hopefully contribute to the machine learning/causal inference literature. As of now however, this paper only examines a single context and thus fails to offer a complete view of the topic.
\section{Data}
In order to test the performance of the different classifiers, I simulate a sample selection model. This model includes two simulated data sets: first, a large data set (N = 100000) of unbiased data following a normal distribution, and then a smaller, biased subset of that data chosen according to a selection equation. The larger data set represents the population, and the smaller data set represents the individuals which are observed. On average, half of the total population is observed. The following equations are used for the simulation. Equation (1) is the outcome equation, and equation (2) is the selection equation. Equation (3) uses the selection equation to determine whether a given individual is observed.
The simulated sample selection model can be formalized in the following way:
\begin{equation}
Y_i = \beta_0 + \beta_1 X_i + \epsilon_i
\end{equation}
\begin{equation}
U_i = \gamma_0 + \gamma_1 Z_i + \eta_i
\end{equation}
\begin{equation}
d_i =
\begin{array}{cc}
\{ &
\begin{array}{cc}
0 & if \; U_i\leq 0 \\
1 & otherwise
\end{array}
\end{array}
\end{equation}
For simplicity, $\beta_0$ and $\gamma_0$ are assigned values of zero. Thus, there is no intercept on the models. Also, I assign a value of -1 to $\beta_i$. This is the coefficient on our regressor of interest. When I correct for the selection bias, I am trying to obtain this value. Additionally, $X_i$ and $Z_i$ each follow a normal distribution such that $X_i$,$Z_i$ \sim \mathcal{N}($0$,\,$\sigma^{2}$)\ . Also, $\eta$ and $\epsilon$ follow a bivariate normal distribution, such that
\begin{pmatrix}\epsilon\\
\eta
\end{pmatrix} &\sim N
\begin{bmatrix}
\begin{pmatrix}
0\\
0
\end{pmatrix}\!\!,&
\begin{pmatrix}
1 & 0\\
0 & \sigma^2
\end{pmatrix}
\end{bmatrix}\\[2\jot]
\par
First, I simulate the population data set of 100000 observations according to the outcome and selection equations. Then, I add a column of treatment values, such that observations are assigned a value of $d=0$ when $U_i \leq 0$ and a value of $d=1$ otherwise. Lastly, I subset this data, removing observations where $d=0$. This results in a biased data set with regards to the relationship between $Y_i$ and $X_i$. The model with only observed data is as follows:
\begin{equation}
Obs Y_i = \beta_0 + \beta_1 X_i + \epsilon_i
\end{equation}
As evidenced by Table A1 in the appendix, this model results in a biased value of $\beta_1$. The linear model with observed data (4) has a $\beta_1 = -.812$ and the linear model with population data (1) has a $\beta_1 = -1.001$. Summary statistics for the two data sets are also available in the appendix.
\section{Methods}
In our simulated model, $U_i$ measures the tendency to be selected and $Y_i$ measures the outcome we want to study. Thus, we observe the outcome of any given data point only if the selection variable is positive. We ultimately want to study the relationship between $Y_i$ and $X_i$. However, we cannot yet directly observe this. What we do observe is $E[ Y | X_i = X, Z_i = Z, d = 1]$, or, our outcome variable conditional on selection. This can be rewritten as $\beta_1 X_i + E[\epsilon | \eta > -\gamma Z_i]$. Thus, the expectation of our outcome consists of two factors: the regressor of interest and the expected error caused by selection. Therefore, by controlling for the error caused by selection, we are able to achieve an accurate measure of $\beta_1$.
\par
In order to control for this error, I obtain the predicted probability of being selected for each observation in the data, that is, $P(d = 1 | X_i = X, Z_i = Z)$. By incorporating this predicted probability in the model of observed data, the effect of selection is essentially removed from the error term.
\par
When incorporating these probabilities, I use the utility-maximizing probability. That is, given $d=1$, I use $p = P(d=1)$, and given $d=0$, I use $p = 1- P(d=1)$. Additionally, to capture more than just a linear relationship, I incorporate the predicted probabilities as a flexible function. I include $p$, $p^2$, and $p^3$. Thus, our corrected model stands as follows:
\begin{equation}
Obs Y_i = \beta_0 + \beta_1 X_i + \beta_2 p_i + \beta_3 p_i^2 + \beta_4 p_i^3 + \epsilon_i
\end{equation}
\par
As mentioned, I obtain the predicted probabilities using four different classifiers. I will briefly overview each of the three machine learning methods that I use.
\par
The random forest classifier essentially averages the predictions of many decision trees. Decision trees attempt to minimize error by splitting the data into increasingly small sub-samples. When no tree depth is specified (as is the case with our model), the data is continually split until the sub samples are maximally pure, that is, all observations in the sub-sample fall into a single category. This often results in overfitting. However, the random forest corrects for this by averaging the predictions over many trees - each tree run on a redrawn sample of the population.
\par
The next classifier I use is a nueral network. The nueral net begins with inputs ($X_i$ and $Z_i$ in our case) and, by continually adjusting how the model is weighted, reaches the output ($d_i$). It achieves this by passing through "layers" that perform transformations on the inputs, ultimately creating some non-linear function mapping the inputs to the output. In addition, I perform cross validation on my neural net.
\par
Lastly, I use a naive bayes classifier, which simply uses Bayes theorem to calculate the posterior probability of each potential outcome ($d_i$) and then chooses the outcome with the greatest probability. The naive bayes is simple and thus computationally inexpensive.
\par
In order to compare the effectiveness of the classifiers, I simply look at which ones result in a model with a $\beta_1$ closest to the true value when incorporated in the corrected model. I also run the classifiers several times and compare their computational efficiency.
\section{Results}
When running the corrected models, all four classifiers are able to recover a value of $\beta_1$ that is within .1 of the true value. However, the neural net and logistic regression performed generally better than the other two classifiers, with the neural net performing best. The results of the corrected regressions using machine learning are contained in Table 2 of the appendix. (1) is the Naive Bayes, (2) is the nueral net, and (3) is the Random Forest. Also, contained in table 3 of the appendix are the results from the logistic regression correction.
\par
I ran each of the four algorithms 100 times. A violin plot containing the computation times for each one is shown in the appendix as well. These plots show the probability density of computation times for each method. The neural net was the most computationally expensive, and the random forest was close behind. The logistic regression and naive bayes did not come with a significant computation cost.
\par
The results tentatively confirm that machine learning - especially neural nets - offer an effective alternative in correcting for selection bias. Moreover, the machine learning algorithms rely on relatively few assumptions. Therefore their application is not as constrained, and it offers a more flexible solution to selection bias relative to existing methods. However, because the standard approach of using logistic regression is only marginally outperformed by the much more computationally expensive neural net, there may not be immediate justification for its use. This paper is limited by the fact that corrections were only tested in a single context. Thus, it is difficult to generalize the findings.
\section{Conclusion}
This research offers a preliminary look at the use of machine learning in sample selection models. Though the results are promising, more research is needed to make definitive conclusions. However, this paper tentatively concludes that using a nueral net classifier results in the best correction. Moreover, the use of a random forest or naive bayes classifier removes a portion of the selection bias but is not nearly as effective.
\par
Future research can increase the number of classifiers, the number of scenarios each classifier is tested under (what assumptions are made), and the number of simulations being run. Some more potential thoughts to examine in closer detail are: an IV or many IVs being included in the model, varying dimensionality of X, varying strength of the selection bias, and varying portion of the population being selected.
\par
Looking at how machine learning classifiers perform under the aforementioned conditions will allow us to gain a clearer idea of where it belongs in application. However, I do believe it will ultimately prove to be empirically useful.
\pagebreak{}
\begin{spacing}{1.0}
\bibliographystyle{jpe}
\bibliography{FinalProject.bib}
\addcontentsline{toc}{section}{References}
\end{spacing}
\pagebreak{}
% The appendix command is issued once, prior to all appendices, if any.
\appendix
\section{Mathematical Appendix}
\begin{table}[!htbp] \centering
\caption{}
\label{}
\begin{tabular}{@{\extracolsep{5pt}}lcc}
\\[-1.8ex]\hline
\hline \\[-1.8ex]
& \multicolumn{2}{c}{\textit{Dependent variable:}} \\
\cline{2-3}
\\[-1.8ex] & Y & obsY \\
\\[-1.8ex] & (1) & (2)\\
\hline \\[-1.8ex]
X & $-$1.001$^{***}$ & $-$0.812$^{***}$ \\
& (0.002) & (0.004) \\
& & \\
Constant & $-$0.002 & 0.319$^{***}$ \\
& (0.002) & (0.004) \\
& & \\
\hline \\[-1.8ex]
Observations & 100,000 & 49,755 \\
R$^{2}$ & 0.665 & 0.493 \\
Adjusted R$^{2}$ & 0.665 & 0.493 \\
Residual Std. Error & 0.709 (df = 99998) & 0.661 (df = 49753) \\
F Statistic & 198,902.500$^{***}$ (df = 1; 99998) & 48,327.610$^{***}$ (df = 1; 49753) \\
\hline
\hline \\[-1.8ex]
\textit{Note:} & \multicolumn{2}{r}{$^{*}$p$<$0.1; $^{**}$p$<$0.05; $^{***}$p$<$0.01} \\
\end{tabular}
\end{table}
\begin{table}[!htbp] \centering
\caption{}
\label{}
\begin{tabular}{@{\extracolsep{5pt}}lccc}
\\[-1.8ex]\hline
\hline \\[-1.8ex]
& \multicolumn{3}{c}{\textit{Dependent variable:}} \\
\cline{2-4}
\\[-1.8ex] & \multicolumn{3}{c}{obsY} \\
\\[-1.8ex] & (1) & (2) & (3)\\
\hline \\[-1.8ex]
X & $-$1.084$^{***}$ & $-$1.002$^{***}$ & $-$0.958$^{***}$ \\
& (0.005) & (0.004) & (0.004) \\
& & & \\
poly(pred.1st.best, 3)1 & $-$100.676$^{***}$ & $-$91.238$^{***}$ & $-$77.010$^{***}$ \\
& (1.282) & (1.031) & (1.010) \\
& & & \\
poly(pred.1st.best, 3)2 & 23.641$^{***}$ & 9.305$^{***}$ & 4.745$^{***}$ \\
& (0.892) & (0.884) & (0.898) \\
& & & \\
poly(pred.1st.best, 3)3 & $-$3.153$^{***}$ & $-$8.495$^{***}$ & $-$8.300$^{***}$ \\
& (0.886) & (0.881) & (0.892) \\
& & & \\
Constant & 0.161$^{***}$ & 0.210$^{***}$ & 0.236$^{***}$ \\
& (0.004) & (0.004) & (0.004) \\
& & & \\
\hline \\[-1.8ex]
Observations & 49,930 & 49,930 & 49,930 \\
R$^{2}$ & 0.569 & 0.571 & 0.553 \\
Adjusted R$^{2}$ & 0.569 & 0.571 & 0.553 \\
Residual Std. Error (df = 49925) & 0.610 & 0.609 & 0.622 \\
F Statistic (df = 4; 49925) & 16,458.040$^{***}$ & 16,585.240$^{***}$ & 15,415.450$^{***}$ \\
\hline
\hline \\[-1.8ex]
\textit{Note:} & \multicolumn{3}{r}{$^{*}$p$<$0.1; $^{**}$p$<$0.05; $^{***}$p$<$0.01} \\
\end{tabular}
\end{table}
\begin{table}[!htbp] \centering
\caption{}
\label{}
\begin{tabular}{@{\extracolsep{5pt}}lc}
\\[-1.8ex]\hline
\hline \\[-1.8ex]
& \multicolumn{1}{c}{\textit{Dependent variable:}} \\
\cline{2-2}
\\[-1.8ex] & obsY \\
\hline \\[-1.8ex]
X & $-$1.004$^{***}$ \\
& (0.004) \\
& \\
poly(pred.1st.best, 3)1 & $-$91.509$^{***}$ \\
& (1.029) \\
& \\
poly(pred.1st.best, 3)2 & 6.666$^{***}$ \\
& (0.888) \\
& \\
poly(pred.1st.best, 3)3 & $-$10.254$^{***}$ \\
& (0.885) \\
& \\
Constant & 0.209$^{***}$ \\
& (0.004) \\
& \\
\hline \\[-1.8ex]
Observations & 49,930 \\
R$^{2}$ & 0.570 \\
Adjusted R$^{2}$ & 0.570 \\
Residual Std. Error & 0.609 (df = 49925) \\
F Statistic & 16,577.450$^{***}$ (df = 4; 49925) \\
\hline
\hline \\[-1.8ex]
\textit{Note:} & \multicolumn{1}{r}{$^{*}$p$<$0.1; $^{**}$p$<$0.05; $^{***}$p$<$0.01} \\
\end{tabular}
\end{table}
\begin{figure}
\includegraphics[width=\linewidth]{Violin.png}
\caption{Computational Efficiency}
\label{fig:boat1}
\end{figure}
\end{document}
\end{document}
| {"hexsha": "a06b6997caf91be150a0a02583fcca22d72c7cd9", "size": 23031, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "FinalProject/Final.Project/FinalProject.tex", "max_stars_repo_name": "omcdevi/DScourseS19", "max_stars_repo_head_hexsha": "87522d20227fcbf934762339dbe9d247b09203e0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "FinalProject/Final.Project/FinalProject.tex", "max_issues_repo_name": "omcdevi/DScourseS19", "max_issues_repo_head_hexsha": "87522d20227fcbf934762339dbe9d247b09203e0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "FinalProject/Final.Project/FinalProject.tex", "max_forks_repo_name": "omcdevi/DScourseS19", "max_forks_repo_head_hexsha": "87522d20227fcbf934762339dbe9d247b09203e0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 81.670212766, "max_line_length": 1395, "alphanum_fraction": 0.7536798228, "num_tokens": 5887} |
import numpy as np
import tensorflow as tf
from config import cfg
epsilon = 1e-9
class CapsLayer(object):
''' Capsule layer.
Args:
input: A 4-D tensor.
num_outputs: the number of capsule in this layer.
vec_len: integer, the length of the output vector of a capsule.
layer_type: string, one of 'FC' or "CONV", the type of this layer,
fully connected or convolution, for the future expansion capability
with_routing: boolean, this capsule is routing with the
lower-level layer capsule.
Returns:
A 4-D tensor.
'''
def __init__(self, num_outputs, vec_len, with_routing=True, layer_type='FC'):
self.num_outputs = num_outputs
self.vec_len = vec_len
self.with_routing = with_routing
self.layer_type = layer_type
def __call__(self, input, kernel_size=None, stride=None):
'''
The parameters 'kernel_size' and 'stride' will be used while 'layer_type' equal 'CONV'
'''
if self.layer_type == 'CONV':
self.kernel_size = kernel_size
self.stride = stride
if not self.with_routing:
# the PrimaryCaps layer, a convolutional layer
# input: [batch_size, 20, 20, 256]
assert input.get_shape() == [cfg.batch_size, 20, 20, 256]
'''
# version 1, computational expensive
capsules = []
for i in range(self.vec_len):
# each capsule i: [batch_size, 6, 6, 32]
with tf.variable_scope('ConvUnit_' + str(i)):
caps_i = tf.contrib.layers.conv2d(input, self.num_outputs,
self.kernel_size, self.stride,
padding="VALID", activation_fn=None)
caps_i = tf.reshape(caps_i, shape=(cfg.batch_size, -1, 1, 1))
capsules.append(caps_i)
assert capsules[0].get_shape() == [cfg.batch_size, 1152, 1, 1]
capsules = tf.concat(capsules, axis=2)
'''
# version 2, equivalent to version 1 but higher computational
# efficiency.
# NOTE: I can't find out any words from the paper whether the
# PrimaryCap convolution does a ReLU activation before
# squashing function. So, which one to use will be your choice
# capsules = tf.contrib.layers.conv2d(input, self.num_outputs * self.vec_len,
# self.kernel_size, self.stride,padding="VALID",
# activation_fn=tf.nn.relu)
capsules = tf.contrib.layers.conv2d(input, self.num_outputs * self.vec_len,
self.kernel_size, self.stride,padding="VALID",
activation_fn=None)
capsules = tf.reshape(capsules, (cfg.batch_size, -1, self.vec_len, 1))
# [batch_size, 1152, 8, 1]
capsules = squash(capsules)
assert capsules.get_shape() == [cfg.batch_size, 1152, 8, 1]
return(capsules)
if self.layer_type == 'FC':
if self.with_routing:
# the DigitCaps layer, a fully connected layer
# Reshape the input into [batch_size, 1152, 1, 8, 1]
self.input = tf.reshape(input, shape=(cfg.batch_size, -1, 1, input.shape[-2].value, 1))
with tf.variable_scope('routing'):
# b_IJ: [1, num_caps_l, num_caps_l_plus_1, 1, 1]
b_IJ = tf.constant(np.zeros([1, input.shape[1].value, self.num_outputs, 1, 1], dtype=np.float32))
capsules = routing(self.input, b_IJ)
capsules = tf.squeeze(capsules, axis=1)
return(capsules)
def routing(input, b_IJ):
''' The routing algorithm.
Args:
input: A Tensor with [batch_size, num_caps_l=1152, 1, length(u_i)=8, 1]
shape, num_caps_l meaning the number of capsule in the layer l.
Returns:
A Tensor of shape [batch_size, num_caps_l_plus_1, length(v_j)=16, 1]
representing the vector output `v_j` in the layer l+1
Notes:
u_i represents the vector output of capsule i in the layer l, and
v_j the vector output of capsule j in the layer l+1.
'''
# W: [num_caps_j, num_caps_i, len_u_i, len_v_j]
W = tf.get_variable('Weight', shape=(1, 1152, 10, 8, 16), dtype=tf.float32,
initializer=tf.random_normal_initializer(stddev=cfg.stddev))
# Eq.2, calc u_hat
# do tiling for input and W before matmul
# input => [batch_size, 1152, 10, 8, 1]
# W => [batch_size, 1152, 10, 8, 16]
input = tf.tile(input, [1, 1, 10, 1, 1])
W = tf.tile(W, [cfg.batch_size, 1, 1, 1, 1])
assert input.get_shape() == [cfg.batch_size, 1152, 10, 8, 1]
# in last 2 dims:
# [8, 16].T x [8, 1] => [16, 1] => [batch_size, 1152, 10, 16, 1]
u_hat = tf.matmul(W, input, transpose_a=True)
assert u_hat.get_shape() == [cfg.batch_size, 1152, 10, 16, 1]
# line 3,for r iterations do
for r_iter in range(cfg.iter_routing):
with tf.variable_scope('iter_' + str(r_iter)):
# line 4:
# => [1, 1152, 10, 1, 1]
c_IJ = tf.nn.softmax(b_IJ, dim=2)
c_IJ = tf.tile(c_IJ, [cfg.batch_size, 1, 1, 1, 1])
assert c_IJ.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
# line 5:
# weighting u_hat with c_IJ, element-wise in the last two dims
# => [batch_size, 1152, 10, 16, 1]
s_J = tf.multiply(c_IJ, u_hat)
# then sum in the second dim, resulting in [batch_size, 1, 10, 16, 1]
s_J = tf.reduce_sum(s_J, axis=1, keep_dims=True)
assert s_J.get_shape() == [cfg.batch_size, 1, 10, 16, 1]
# line 6:
# squash using Eq.1,
v_J = squash(s_J)
assert v_J.get_shape() == [cfg.batch_size, 1, 10, 16, 1]
# line 7:
# reshape & tile v_j from [batch_size ,1, 10, 16, 1] to [batch_size, 10, 1152, 16, 1]
# then matmul in the last tow dim: [16, 1].T x [16, 1] => [1, 1], reduce mean in the
# batch_size dim, resulting in [1, 1152, 10, 1, 1]
v_J_tiled = tf.tile(v_J, [1, 1152, 1, 1, 1])
u_produce_v = tf.matmul(u_hat, v_J_tiled, transpose_a=True)
assert u_produce_v.get_shape() == [cfg.batch_size, 1152, 10, 1, 1]
b_IJ += tf.reduce_sum(u_produce_v, axis=0, keep_dims=True)
return(v_J)
def squash(vector):
'''Squashing function corresponding to Eq. 1
Args:
vector: A 5-D tensor with shape [batch_size, 1, num_caps, vec_len, 1],
Returns:
A 5-D tensor with the same shape as vector but squashed in 4rd and 5th dimensions.
'''
vec_squared_norm = tf.reduce_sum(tf.square(vector), -2, keep_dims=True)
scalar_factor = vec_squared_norm / (1 + vec_squared_norm) / tf.sqrt(vec_squared_norm + epsilon)
vec_squashed = scalar_factor * vector # element-wise
return(vec_squashed)
# TODO: 1. Test the `fully_connected` and `conv2d` function;
# 2. Update docs about these two function.
def fully_connected(inputs,
num_outputs,
vec_len,
with_routing=True,
weights_initializers=tf.contrib.layers.xavier_initializer(),
reuse=None,
variable_collections=None,
scope=None):
'''A capsule fully connected layer.(Note: not tested yet)
Args:
inputs: A tensor of as least rank 3, i.e. `[batch_size, num_inputs, vec_len]`,
`[batch_size, num_inputs, vec_len, 1]`.
num_outputs: ...
Returns:
...
Raise:
...
'''
layer = CapsLayer(num_outputs=num_outputs,
vec_len=vec_len,
with_routing=with_routing,
layer_type='FC')
return layer.apply(inputs)
def conv2d(inputs,
filters,
vec_len,
kernel_size,
strides=(1, 1),
with_routing=False,
reuse=None):
'''A capsule convolutional layer.(Note: not tested yet)
Args:
inputs: A tensor.
Returns:
...
Raises:
...
'''
layer = CapsLayer(num_outputs=filters,
vec_len=vec_len,
with_routing=with_routing,
layer_type='CONV')
return(layer(inputs, kernel_size=kernel_size, stride=strides))
| {"hexsha": "7cae31de45ba96c2017229378a48af620a8f180c", "size": 8856, "ext": "py", "lang": "Python", "max_stars_repo_path": "capsLayer.py", "max_stars_repo_name": "crjramsden/CapsuleAI", "max_stars_repo_head_hexsha": "3d04a76259c72b647cf63c4dbad7c173c3851005", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "capsLayer.py", "max_issues_repo_name": "crjramsden/CapsuleAI", "max_issues_repo_head_hexsha": "3d04a76259c72b647cf63c4dbad7c173c3851005", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "capsLayer.py", "max_forks_repo_name": "crjramsden/CapsuleAI", "max_forks_repo_head_hexsha": "3d04a76259c72b647cf63c4dbad7c173c3851005", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.0, "max_line_length": 117, "alphanum_fraction": 0.5485546522, "include": true, "reason": "import numpy", "num_tokens": 2267} |
# ----------------------------------------------------------------------------
# test_hsi11.py
#
# Copyright 2021 Daniel Tisza
# MIT License
#
# Taking image with HSI changed to use mvImpact Pythin binding
#
# ----------------------------------------------------------------------------
from __future__ import print_function
import os
import platform
import sys
from mvIMPACT import acquire
from mvIMPACT.Common import exampleHelper
import ctypes
import numpy as np
import datetime as dt
import matplotlib
from LEDDriver import detect_LED_devices, LEDDriver, LEDException
from spectracular.fpi_driver import detectFPIDevices, createFPIDevice
import fpipy as fp
import fpipy.conventions as c
import xarray as xr
from tqdm.autonotebook import tqdm, trange
#-----------------------------------------
# LED driver
#-----------------------------------------
LED_IDS = [
# ( VID, PID) (and the same in decimal)
('1FC9', '0083'), (8137, 131),
]
"""Known VID:PID pairs of LED devices."""
LED_HWIDS = [
# Strings returned by read_hardware_id
'1000e016 aefba123 580267dd f5001982',
'10025018 af28a028 5a66a511 f5001983'
]
ledportdevice = detect_LED_devices()
print(ledportdevice)
ledportstring = '/dev/ttyACM1'
# ledportstring = 'COM10'
print('Trying to use ' + ledportstring + ' for LED control')
# led = LEDDriver('/dev/ttyACM0')
# led = LEDDriver('COM10')
led = LEDDriver(ledportstring)
print(led)
led.open()
print('Turning off LEDs')
led.L(0)
led.close()
#-----------------------------------------
# Camera
#-----------------------------------------
print("Creating device manager")
devMgr = acquire.DeviceManager()
print("Asking user to select device")
pDev = exampleHelper.getDeviceFromUserInput(devMgr)
if pDev == None:
exampleHelper.requestENTERFromUser()
sys.exit(-1)
pDev.open()
#
# Set system settings
#
# RequestCount 10
#
ss = acquire.SystemSettings(pDev)
print("Old RequestCount:")
print(ss.requestCount.readS())
#
# Basic device settings
#
bdc = acquire.BasicDeviceSettings(pDev)
print("Old ImageRequestTimeout_ms:")
print(bdc.imageRequestTimeout_ms.readS())
#
# Set camera settings
#
# AcquisitionMode SingleFrame
# TriggerSource Line1
# TriggerMode Off
#
ac = acquire.AcquisitionControl(pDev)
print("Old AcquisitionMode:")
print(ac.acquisitionMode.readS())
print("New AcquisitionMode:")
ac.acquisitionMode.writeS("SingleFrame")
print(ac.acquisitionMode.readS())
print("Old TriggerMode:")
print(ac.triggerMode.readS())
# print("New TriggerMode:")
# ac.triggerMode.writeS("On")
# print(ac.triggerMode.readS())
print("Old TriggerSource:")
print(ac.triggerSource.readS())
# print("New TriggerSource:")
# ac.triggerSource.writeS("Software")
# print(ac.triggerSource.readS())
print("Old ExposureAuto:")
print(ac.exposureAuto.readS())
print("New ExposureAuto:")
ac.exposureAuto.writeS("Off")
print(ac.exposureAuto.readS())
ifc = acquire.ImageFormatControl(pDev)
print("Old pixelformat:")
print(ifc.pixelFormat.readS())
print("New pixelformat:")
ifc.pixelFormat.writeS("BayerGB12")
# ifc.pixelFormat.writeS("RGB8")
print(ifc.pixelFormat.readS())
print("Old pixelColorFilter:")
print(ifc.pixelColorFilter.readS())
imgp = acquire.ImageProcessing(pDev)
# "Auto" originally
print("Old colorProcessing:")
print(imgp.colorProcessing.readS())
imgp.colorProcessing.writeS("Raw")
print("New colorProcessing:")
print(imgp.colorProcessing.readS())
print("Old ExposureTime:")
print(ac.exposureTime.readS())
print("New ExposureTime:")
ac.exposureTime.writeS("150000")
print(ac.exposureTime.readS())
anlgc = acquire.AnalogControl(pDev)
print("Old BalanceWhiteAuto:")
print(anlgc.balanceWhiteAuto.readS())
print("New BalanceWhiteAuto:")
anlgc.balanceWhiteAuto.writeS("Off")
print(anlgc.balanceWhiteAuto.readS())
print("Old Gamma:")
print(anlgc.gamma.readS())
print("New Gamma:")
anlgc.gamma.writeS("1")
print(anlgc.gamma.readS())
print("Old Gain:")
print(anlgc.gain.readS())
print("New Gain:")
anlgc.gain.writeS("1.9382002601")
print(anlgc.gain.readS())
print("Old GainAuto:")
print(anlgc.gainAuto.readS())
print("New GainAuto:")
anlgc.gainAuto.writeS("Off")
print(anlgc.gainAuto.readS())
#-----------------------------------------
# MFPI
#-----------------------------------------
FPI_IDS = [
# ( VID, PID) (and the same in decimal)
('1FC9', '0083'), (8137, 131),
]
"""Known VID:PID pairs of FPI devices."""
FPI_HWIDS = [
# Strings returned by read_hardware_id
'd02b012 af380065 5b5bbeab f50019c1'
]
print('Trying to create FPI device')
fpi = createFPIDevice(detectFPIDevices(FPI_IDS, FPI_HWIDS)[0].device)
print(fpi)
# ------------------------------------------
# camazing.pixelformats
# ------------------------------------------
class PixelFormatError(Exception):
pass
def get_valid_range(pxformat):
"""Return the valid range of values for a given pixel format.
Parameters
----------
pxformat: str
Pixel format as given by cameras GenICam PixelFormat feature.
Returns
------
np.array
A vector of [min_value, max_value] with the same type as the decoded
pixel format.
"""
try:
valid_range = _ranges[pxformat]
except KeyError:
raise PixelFormatError(f'No range found for the pixel format `{pxformat}')
return valid_range
def get_decoder(pxformat):
"""Return a numpy decoder for a given GenICam pixel format.
Parameters
----------
pxformat: str
Pixel format as given by cameras PixelFormat.
Returns
-------
decoder: function
Function for decoding a buffer
"""
try:
decoder = _decoders[pxformat]
except KeyError:
raise PixelFormatError(f'No decoder for the pixel format `{pxformat}`')
return decoder
def decode_raw(dtype):
"""Decode raw buffer with a given bit depth."""
def decode(buf, shape):
return np.frombuffer(
buf,
dtype=dtype
).reshape(*shape).copy()
return decode
def decode_RGB(bpp):
"""Decode RGB buffer with a given bit depth."""
def decode(buf, shape):
return np.frombuffer(
buf,
dtype=bpp,
).reshape(*shape, 3).copy()
return decode
def decode_YCbCr422_8():
"""Decode YCbCr422 buffer with given bit depth."""
raise NotImplementedError
_decoders = {
'BayerRG8': decode_raw(np.uint8),
'BayerGB8': decode_raw(np.uint8),
'BayerGB12': decode_raw(np.uint16),
'BayerRG12': decode_raw(np.uint16),
'BayerRG16': decode_raw(np.uint16),
'RGB8': decode_RGB(np.uint8),
'Mono8': decode_raw(np.uint8),
'Mono16': decode_raw(np.uint16),
}
_ranges = {
'BayerRG8': np.uint8([0, 255]),
'BayerGB8': np.uint8([0, 255]),
'BayerGB12': np.uint16([0, 4095]),
'BayerRG12': np.uint16([0, 4095]),
'BayerRG16': np.uint16([0, 65535]),
'RGB8': np.uint8([0, 255]),
'Mono8': np.uint8([0, 255]),
'Mono16': np.uint16([0, 65535]),
}
# ------------------------------------------
# camazing.core
# ------------------------------------------
class DanielCamera:
def __init__(self, pDev):
self._meta = None
self._pDev = pDev
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
print("Exit DanielCamera")
def _get_frame(self, timeout=1):
"""Helper function"""
self._pixel_format = "BayerGB12"
self._buffer_decoder = get_decoder(self._pixel_format)
self._image_range = get_valid_range(self._pixel_format)
# data = self._buffer_decoder(buffer.raw_buffer, (height, width))
#------------------------
# Take frame
#------------------------
self._fi = acquire.FunctionInterface(pDev)
self._fi.imageRequestReset(0,0)
self._fi.imageRequestSingle()
exampleHelper.manuallyStartAcquisitionIfNeeded(self._pDev, self._fi)
requestNr = self._fi.imageRequestWaitFor(20000)
exampleHelper.manuallyStopAcquisitionIfNeeded(self._pDev, self._fi)
data = []
if self._fi.isRequestNrValid(requestNr):
print("Request number valid! " + str(requestNr))
pRequest = self._fi.getRequest(requestNr)
print("Print request: " + str(pRequest))
print("Print request result: " + str(pRequest.requestResult))
print("Print request result readS: " + pRequest.requestResult.readS())
if pRequest.isOK:
print("Request OK!")
height = pRequest.imageHeight.read()
width = pRequest.imageWidth.read()
channelCount = pRequest.imageChannelCount.read()
channelBitDepth = pRequest.imageChannelBitDepth.read()
imageSize = pRequest.imageSize.read()
print("Image height: " + str(height))
print("Image width: " + str(width))
print("Image channel count: " + str(channelCount))
print("Image channel bit depth: " + str(channelBitDepth))
print("Image size: " + str(imageSize))
cbuf = (ctypes.c_char * pRequest.imageSize.read()).from_address(int(pRequest.imageData.read()))
# Check if this is now correct buffer format!
# Convert with numpy if needed
data = self._buffer_decoder(cbuf, (height, width))
print("Data from buffer_decoder()")
print(data)
self._fi.imageRequestUnlock(requestNr)
exampleHelper.manuallyStopAcquisitionIfNeeded(pDev, self._fi)
self._fi.imageRequestReset(0,0)
else:
print("imageRequestWaitFor failed (" + str(requestNr) + ", " + acquire.ImpactAcquireException.getErrorCodeAsString(requestNr) + ")")
exampleHelper.manuallyStopAcquisitionIfNeeded(self._pDev, self._fi)
return data
def _get_frame_with_meta(self):
"""Fetch a frame and add metadata from the camera."""
data = self._get_frame()
print("Data from _get_frame(): ")
print(data)
height, width = data.shape[0], data.shape[1]
coords = {
"x": ("x", np.arange(0, width) + 0.5),
"y": ("y", np.arange(0, height) + 0.5),
"timestamp": dt.datetime.today().timestamp(),
}
if 'RGB' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = list('RGB')
elif 'YUV' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = list('YUV')
elif 'YCbCr' in self._pixel_format:
dims = ('y', 'x', 'colour')
coords['colour'] = ['Y', 'Cb', 'Cr']
else:
dims = ('y', 'x')
# Keep some meta by default, if available
# self._meta = []
# for feature in ['Gain', 'ExposureTime', 'PixelFormat', 'PixelColorFilter']:
# if feature in self._features:
# self._meta.append(feature)
# Add metadata as coordinates
# if self._meta:
# coords.update({k: self._features[k].value for k in self._meta})
# Replace these hard-coded values by reading from camera!
coords['Gain'] = "1.9382002601"
coords['ExposureTime'] = 150000
coords['PixelFormat'] = "BayerGB12"
coords['PixelColorFilter'] = "BayerGB"
frame = xr.DataArray(
data,
name="frame",
dims=dims,
coords=coords,
attrs={
'valid_range': self._image_range,
}
)
return frame
def get_frame(self):
return self._get_frame_with_meta()
# ------------------------------------------
# HSI
# ------------------------------------------
class CaptureException(Exception):
pass
class HSI:
"""Hyperspectral imager"""
def __init__(self, camera=None, fpi=None):
self.camera = camera
self.fpi = fpi
self.dataset = None
self.calibration_file = None
def read_calibration_file(self, calibration_file):
self.dataset = fp.io.read_calibration(calibration_file)
self.calibration_file = calibration_file
def take_dark_reference(self, number_of_frames=40, method="median"):
self.read_calibration_file(self.calibration_file)
# original_trigger_source = self.camera["TriggerSource"].value
# self.camera["TriggerSource"].value = "Software"
frames = []
with self.camera:
for idx in trange(0, number_of_frames):
frame = self.camera.get_frame()
frame.coords[c.image_index] = idx
frames.append(frame)
# self.camera["TriggerSource"].value = original_trigger_source
dark = xr.concat(frames, dim=c.image_index)
if method == "median":
dark = dark.median(dim=c.image_index)
elif method == "mean":
dark = dark.mean(dim=c.image_index)
else:
raise ValueError("Unknown method: '" + method)
self.dataset[c.dark_reference_data] = dark
return dark
def capture_cube(self, *, selectors=None):
if selectors is None:
dataset = self.dataset.copy()
else:
dataset = self.dataset.sel(**selectors).copy()
frames = []
# if self.camera["TriggerSource"].value == "Software":
with self.camera:
for idx in tqdm(dataset[c.image_index].values):
setpoint = dataset[c.setpoint_data].sel(
**{c.setpoint_coord: "SP1",
c.image_index: idx,
}).values
self.fpi.set_setpoint(setpoint, wait=True)
frame = self.camera.get_frame()
frame.coords[c.image_index] = idx
frames.append(frame)
# else:
# with self.camera:
# self.create_fpi_taskfile(dataset)
# self.camera["StrobeDuration"].value = \
# self.camera["ExposureTime"].value
# self.fpi.run_taskfile()
# for idx, setpoint in enumerate(tqdm(
# dataset.setpoint.sel(setpoint_index="SP1").values)):
# frame = self.camera.get_frame()
# frame.coords[c.image_index] = idx
# frames.append(frame)
dataset[c.cfa_data] = xr.concat(frames, dim=c.image_index)
return dataset
def create_fpi_taskfile(dataset):
raise NotImplementedError()
danielCam = DanielCamera(pDev)
print(danielCam)
hsi = HSI(danielCam, fpi)
print(hsi)
hsi.read_calibration_file('led_set_a_calib_single.txt')
input("Put the lens cap on")
# hsi.take_dark_reference()
# Reduce dark frame count from 40 to 2,
# because Zybo does not have enough memory
led = LEDDriver(ledportstring)
print(led)
led.open()
print('Turning off LEDs')
led.L(0)
led.close()
hsi.take_dark_reference(2)
print(hsi.dataset.dark)
input("Take the lens cap off and set white reference")
print('Turning on LEDs')
# VIS
#
# 542,8327583
# 552,8525817
#
# 701,3626464
# 710,1310492
#
# 111000000111000000111000000
# * Reverse for LED control:
# 000000111000000111000000111
#
led = LEDDriver(ledportstring)
print(led)
led.open()
print('Turning on LEDs')
led.L(0b000000111000000111000000111)
led.close()
print('Capturing white reference')
white_raw = hsi.capture_cube()
input("Set image (only for radiance)")
print('Capturing cube')
raw = hsi.capture_cube()
print(raw)
input("Waiting for keypress to turn off LEDs")
print('Turning off LEDs')
led = LEDDriver(ledportstring)
print(led)
led.open()
print('Turning off LEDs')
led.L(0)
led.close()
# print('Calculating radiance')
# rad = fp.raw_to_radiance(raw, keep_variables=['dark'])
# print(rad)
# print(rad['radiance'])
#
# print('Calculating white radiance')
# rad['white'] = fp.raw_to_radiance(white_raw, keep_variables = []).radiance
# print(rad['white'])
#
# print('Calculating reflectance')
# rad['reflectance'] = rad.radiance / rad.white
# print(rad['reflectance'])
#
# # reflectance = fp.radiance_to_reflectance(rad, white_raw, keep_variables=[])
# # print(reflectance)
#
# print('Extracting single frame from cube and saving to PNG')
# test = rad["radiance"]
#
# print('Radiance data')
# testdata = test.data
# print(testdata)
#
# print('White data')
# whitedata = rad['white'].data
# print(whitedata)
#
# print('Reflectance data')
# reflectdata = rad['reflectance'].data
# print(reflectdata)
#
# print ("Wavelengths")
# wavelengths = rad["wavelength"].data
# print(wavelengths)
#
# print ("Wavelengths count")
# wavelengthCount = len(wavelengths)
# print(wavelengthCount)
#
# # Multiple peaks result in multiple of single calib file row count
# imagelastindex = wavelengthCount
#
# #
# # Save radiance images
# #
# print('Start saving radiance images')
# for x in range(0, imagelastindex):
#
# wavelengthValue = wavelengths[x]
# wavelengthStr = str(wavelengthValue)
# wavelengthReplacedStr = wavelengthStr.replace(".", "p")
# print('Saving wavelength: ' + wavelengthStr)
#
# rad1 = testdata[:,:,x]
# # matplotlib.image.imsave('rad_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', rad1)
#
# white1 = whitedata[:,:,x]
# # matplotlib.image.imsave('white_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', white1)
#
# ref1 = reflectdata[:,:,x]
# matplotlib.image.imsave('refl_' + wavelengthReplacedStr + 'nm_' + str(x) + '.png', ref1, vmin=0,vmax=1)
import matplotlib.pyplot as plt
plt.gray()
#
# Save raw images and demosaic images
#
print('Start saving raw data')
for x in range(0, 1):
# Raw data values
dn1 = raw.dn.isel(index=x)
matplotlib.image.imsave('raw_' + str(x) + '.png', dn1)
# Demosaic to get three colour channels
# dm1 = fp.demosaic(dn1, 'BayerGB', 'bilinear')
# dm1_red = dm1[:,:,0]
# dm1_green = dm1[:,:,1]
# dm1_blue = dm1[:,:,2]
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_red.png', dm1_red)
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_green.png', dm1_green)
# matplotlib.image.imsave('raw_' + str(x) + '_demosaic_blue.png', dm1_blue)
# fi.acquisitionStart()
# self["TriggerSoftware"].execute()
# acquire.TriggerControl.triggerSoftware()
# fi.acquisitionStop()
| {"hexsha": "e6c5edd0851be5ac42774998940e51cf36072dfe", "size": 18433, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/zybohsi/test_hsi11.py", "max_stars_repo_name": "DanielTisza/spectralcamera", "max_stars_repo_head_hexsha": "4fef93b3b4cd8f83e016070f1c0d68aa0cff5102", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/zybohsi/test_hsi11.py", "max_issues_repo_name": "DanielTisza/spectralcamera", "max_issues_repo_head_hexsha": "4fef93b3b4cd8f83e016070f1c0d68aa0cff5102", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/zybohsi/test_hsi11.py", "max_forks_repo_name": "DanielTisza/spectralcamera", "max_forks_repo_head_hexsha": "4fef93b3b4cd8f83e016070f1c0d68aa0cff5102", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.5988455988, "max_line_length": 144, "alphanum_fraction": 0.6153637498, "include": true, "reason": "import numpy", "num_tokens": 4583} |
import argparse
import numpy as np
import torch
import yaml
from torch.utils.data import DataLoader
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from model import LanguageModel
from dataset import CoNLLDataset, collate_annotations
FLAGS = None
def main(_):
# Load configuration.
with open(FLAGS.config, 'r') as f:
config = yaml.load(f)
# Initialize CoNLL dataset.
dataset = CoNLLDataset(fname=config['data']['train'], target='lm')
# Initialize model.
language_model = LanguageModel(
vocab_size=len(dataset.token_vocab),
embedding_dim=config['model']['embedding_dim'],
hidden_size=config['model']['hidden_size'],
num_layers=config['model']['num_layers'])
if torch.cuda.is_available():
language_model = language_model.cuda()
# Initialize loss function. NOTE: Manually setting weight of padding to 0.
weight = torch.ones(len(dataset.token_vocab))
weight[0] = 0
if torch.cuda.is_available():
weight = weight.cuda()
loss_function = torch.nn.NLLLoss(weight)
optimizer = torch.optim.Adam(language_model.parameters())
# Main training loop.
data_loader = DataLoader(
dataset,
batch_size=config['training']['batch_size'],
shuffle=True,
collate_fn=collate_annotations)
losses = []
i = 0
for epoch in range(config['training']['num_epochs']):
for batch in data_loader:
inputs, targets, lengths = batch
optimizer.zero_grad()
outputs, _ = language_model(inputs, lengths=lengths)
outputs = outputs.view(-1, len(dataset.token_vocab))
targets = targets.view(-1)
loss = loss_function(outputs, targets)
loss.backward()
optimizer.step()
losses.append(loss.data[0])
if (i % 100) == 0:
average_loss = np.mean(losses)
losses = []
print('Iteration %i - Loss: %0.6f' % (i, average_loss), end='\r')
if (i % 1000) == 0:
torch.save(language_model, config['data']['checkpoint'])
i += 1
torch.save(language_model, config['data']['checkpoint'])
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True,
help='Path to configuration file.')
FLAGS, _ = parser.parse_known_args()
main(_)
| {"hexsha": "b975820b54544011a3cf9c9ca812def1281769d3", "size": 2477, "ext": "py", "lang": "Python", "max_stars_repo_path": "tutorials/rnn-examples/train_lm.py", "max_stars_repo_name": "SamuelPhang/uci-statnlp", "max_stars_repo_head_hexsha": "799e95daa1cbdfe00f501b3068df24eb90c0d351", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2017-01-19T21:36:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-07T05:49:01.000Z", "max_issues_repo_path": "tutorials/rnn-examples/train_lm.py", "max_issues_repo_name": "SamuelPhang/uci-statnlp", "max_issues_repo_head_hexsha": "799e95daa1cbdfe00f501b3068df24eb90c0d351", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorials/rnn-examples/train_lm.py", "max_forks_repo_name": "SamuelPhang/uci-statnlp", "max_forks_repo_head_hexsha": "799e95daa1cbdfe00f501b3068df24eb90c0d351", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 82, "max_forks_repo_forks_event_min_datetime": "2017-01-22T00:06:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-07T11:45:48.000Z", "avg_line_length": 30.9625, "max_line_length": 81, "alphanum_fraction": 0.623334679, "include": true, "reason": "import numpy", "num_tokens": 532} |
import sys
import os
import pyflann
import params
import numpy as np
import draw_func2 as df2
import helpers
np.random.seed(5)
# Parameters
tdim = 2; # Target viewing dimensions
dim = 2; # Calculation dimension
if len(sys.argv) == 2:
tdim = int(sys.argv[1])
dim = int(sys.argv[1])
K = 4;
checks = 128;
nQuery = 8;
nData = 1024;
# Script
def quick_flann_index(data):
data_flann = pyflann.FLANN()
flann_params = params.VSMANY_FLANN_PARAMS
checks = flann_params['checks']
data_flann.build_index(data, **flann_params)
return data_flann
def reciprocal_nearest_neighbors(query, data, data_flann, checks):
nQuery, dim = query.shape
# Assign query features to K nearest database features
(qfx2_dx, qfx2_dists) = data_flann.nn_index(query, K, checks=checks)
# Assign those nearest neighbors to K nearest database features
qx2_nn = data[qfx2_dx]
qx2_nn.shape = (nQuery*K, dim)
(_nn2_dx, nn2_dists) = data_flann.nn_index(qx2_nn, K, checks=checks)
# Get the maximum distance of the reciprocal neighbors
nn2_dists.shape = (nQuery, K, K)
qfx2_maxdist = nn2_dists.max(2)
# Test if nearest neighbor distance is less than reciprocal distance
isReciprocal = qfx2_dists < qfx2_maxdist
return qfx2_dx, qfx2_dists, isReciprocal
data = np.random.rand(nData, dim)
query = np.random.rand(nQuery, dim)
nQuery = len(query)
# Find query's Nearest Neighbors in data
data_flann = quick_flann_index(data)
(qfx2_dx, qfx2_dists) = data_flann.nn_index(query, K, checks=checks)
qx2_nn = data[qfx2_dx]
# get k-reciprocal nearest neighbors max distance
qx2_nn.shape = (nQuery*K, dim)
(nn2_dx, nn2_dists) = data_flann.nn_index(qx2_nn, K, checks=checks)
nn2_data = data[nn2_dx] # data's nearest neighbors
nn2_dists.shape = (nQuery, K, K)
qx2_nn.shape = (nQuery, K, dim)
qfx2_maxdist = nn2_dists.max(2)
# A neighbor is a K reciprocal if you are within the
# max distance of the assigned points K nearest neighbors
isReciprocal = qfx2_dists < qfx2_maxdist
krx2_nn = qx2_nn[isReciprocal]
krx2_qfx = helpers.tiled_range(nQuery, K)[isReciprocal]
krx2_query = query[krx2_qfx]
# Enforce viewable dimensionality
if dim != tdim:
import sklearn.decomposition
print('Plotting pca.transform dimensionality')
pca = sklearn.decomposition.PCA(copy=True, n_components=tdim, whiten=False)
pca.fit(data)
query_ = pca.transform(query)
data_ = pca.transform(data)
nn2_data_ = pca.transform(nn2_data)
qx2_nn_ = pca.transform(qx2_nn)
krx2_query_ = pca.transform(krx2_query)
krx2_nn_ = pca.transform(krx2_nn)
else:
print('Plotting full dimensionality')
query_ = (query)
data_ = (data)
qx2_nn_ = (qx2_nn)
krx2_query_ = (krx2_query)
krx2_nn_ = (krx2_nn)
# Figure and Axis
plt = df2.plt
df2.reset()
fig = plt.figure(1)
if tdim == 2:
ax = fig.add_subplot(111)
elif tdim > 2:
from mpl_toolkits.mplot3d import Axes3D
ax = fig.add_subplot(111, projection='3d')
def plot_points(data, color, marker):
dataT = data.T
if len(dataT) == 2:
ax.plot(dataT[0], dataT[1], color=color, marker=marker, linestyle='None')
elif len(dataT) == 3:
ax.scatter(dataT[0], dataT[1], dataT[2], color=color, marker=marker)
def plot_lines(point_pairs, color):
for pair in point_pairs:
dataT = pair.T
if len(dataT) == 2:
ax.plot(dataT[0], dataT[1], color=color)
elif len(dataT) == 3:
ax.plot(dataT[0], dataT[1], dataT[2], color=color)
#plt.scatter(dataT[0], dataT[1], dataT[2], s=20, color=color)
# Plot query / data
plot_points(data_, 'b', 'x')
plot_points(query_,'b', 'o')
# Plot KNN
qx2_nn_.shape = (nQuery, K, tdim)
point_pairs = [np.vstack((query_[qx], qx2_nn_[qx,k])) for qx in xrange(nQuery) for k in xrange(K)]
plot_lines(point_pairs, (1, 0, 0, .8))
# Plot NN's KNN
qx2_nn_.shape = (nQuery*K, tdim)
nRes = len(qx2_nn_)
point_pairs3 = [np.vstack((qx2_nn_[nnx], nn2_data_[nnx,k])) for nnx in xrange(nRes) for k in xrange(K)]
plot_lines(point_pairs3, (1, .8, .8, .5))
# Plot KRNN
point_pairs2 = map(np.vstack, zip(krx2_query_, krx2_nn_))
plot_lines(point_pairs2, (0, 1, 0, .9))
df2.update()
# Show
df2.set_figtitle('KRNN=(Green), NN=(Red), NNR=(Pink), dims=%r, K=%r' % (dim, K))
exec(df2.present())
| {"hexsha": "351ec4cc9af99e6ff463624cae8f19671ea65acd", "size": 4308, "ext": "py", "lang": "Python", "max_stars_repo_path": "_scripts/k_reciprocal_nearest_neighbors.py", "max_stars_repo_name": "Erotemic/hotspotter", "max_stars_repo_head_hexsha": "3cfa4015798e21385455b937f9083405c4b3cf53", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2015-07-19T02:55:06.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-07T02:38:26.000Z", "max_issues_repo_path": "_scripts/k_reciprocal_nearest_neighbors.py", "max_issues_repo_name": "Erotemic/hotspotter", "max_issues_repo_head_hexsha": "3cfa4015798e21385455b937f9083405c4b3cf53", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2017-03-11T16:30:26.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-10T16:42:10.000Z", "max_forks_repo_path": "_scripts/k_reciprocal_nearest_neighbors.py", "max_forks_repo_name": "Erotemic/hotspotter", "max_forks_repo_head_hexsha": "3cfa4015798e21385455b937f9083405c4b3cf53", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-07-19T03:05:42.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-24T14:48:59.000Z", "avg_line_length": 32.3909774436, "max_line_length": 103, "alphanum_fraction": 0.6901114206, "include": true, "reason": "import numpy", "num_tokens": 1406} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/12/23 10:01 AM
# @Author : zhangzhen
# @Site :
# @File : torch_tutorial.py
# @Software: PyCharm
import torch
import math
import numpy as np
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
def act_functions():
"""
draw the activation function
:return:
"""
# generate data
x = torch.linspace(-5, 5, 200)
x = Variable(x)
x_np = x.data.numpy()
y_relu = F.relu(x).data.numpy()
y_sigmoid = F.sigmoid(x).data.numpy()
y_tanh = F.tanh(x).data.numpy()
y_softplus = F.softplus(x).data.numpy()
plt.figure(1, figsize=(10, 8))
# sub fig -- relu
plt.subplot(221)
plt.plot(x_np, y_relu, c='red', label='relu')
plt.ylim(-1, 5)
plt.legend(loc='best')
# sub fig -- sigmoid
plt.subplot(222)
plt.plot(x_np, y_sigmoid, c='blue', label='sigmoid')
plt.ylim(-0.2, 1.2)
plt.legend(loc='best')
# sub fig -- tanh
plt.subplot(223)
plt.plot(x_np, y_tanh, c='green', label='tanh')
plt.ylim(-1.2, 1.2)
plt.legend(loc='best')
plt.subplot(224)
plt.plot(x_np, y_softplus, c='cyan', label='softplus')
plt.ylim(-0.2, 6)
plt.legend(loc='best')
plt.show()
def loss_functions():
# head
plt.figure(figsize=(20, 16))
plt.title(u"损失函数")
# sub fig -- sigmoid
plt.subplot(331)
x_sig = np.linspace(start=-15, stop=15, dtype=np.float)
loss_sigmoid = 1 / (1 + np.exp(-x_sig))
plt.plot(x_sig, loss_sigmoid, c='red', label='sigmoid')
plt.ylim(-0.2, 1.2)
plt.legend(loc='best')
plt.grid()
# sub fig -- logistics
plt.subplot(332)
x_logi = np.linspace(start=-5, stop=10, dtype=np.float)
loss_logistics = np.log((1 + np.exp(-x_logi))) / np.log(2)
plt.plot(x_logi, loss_logistics, c='blue', label='logistics')
plt.ylim(-0.5, 8)
plt.legend(loc='best')
plt.grid()
# sub fig -- boost
plt.subplot(333)
x_boost = np.linspace(start=-3, stop=10, dtype=np.float)
loss_boost = np.exp(-x_boost)
plt.plot(x_boost, loss_boost, c='cyan', label='boost')
plt.ylim(-0.2, 15)
plt.legend(loc='best')
plt.grid()
# sub fig -- 0/1
plt.subplot(334)
x_0_1 = np.linspace(start=-10, stop=10, num=1001, dtype=np.float)
loss_0_1 = x_0_1 < 0
plt.plot(x_0_1, loss_0_1, c='olive', label='0/1')
plt.ylim(-0.2, 1.2)
plt.legend(loc='best')
plt.grid()
# sub fig -- hinge
plt.subplot(335)
x_hinge = np.linspace(-5, 10, num=1001, dtype=np.float)
loss_hinge = 1.0 - x_hinge
loss_hinge[loss_hinge < 0] = 0
plt.plot(x_hinge, loss_hinge, c='navy', label='hinge')
plt.ylim(-0.2, 5)
plt.legend(loc='best')
plt.grid()
# sub fig -- mse and mae
plt.subplot(336)
x_mse_mae = np.linspace(-2.5, 2.5, num=1001, dtype=np.float)
loss_mse = np.square(x_mse_mae)
loss_mae = np.abs(x_mse_mae)
plt.plot(x_mse_mae, loss_mse, c='yellowgreen', label='mse')
plt.plot(x_mse_mae, loss_mae, c='tan', label='mae')
plt.ylim(-0.2, 4)
plt.legend(loc='upper right')
plt.grid()
# sub fig -- log cosh
plt.subplot(337)
x_log_cosh = np.linspace(-4, 4, num=1001, dtype=np.float)
loss_log_cosh = np.log2(np.cosh(x_log_cosh))
plt.plot(x_log_cosh, np.cos(x_log_cosh), c='olivedrab', label='cos')
plt.plot(x_log_cosh, np.cosh(x_log_cosh), c='maroon', label='cosh')
plt.plot(x_log_cosh, np.log2(np.cosh(x_log_cosh)), c='palegreen', label='logcosh')
plt.ylim(-1.5, 10)
plt.legend(loc='upper right')
plt.grid()
# sub fig -- huber
plt.subplot(338)
x_huber = np.linspace(-100, 100, num=1001, dtype=np.float)
plt.plot(x_huber, np.square(x_huber) / 2, c='violet', label='squared loss', lw=2)
for d in (10, 5, 3, 2, 1):
plt.plot(x_huber, (abs(x_huber) <= d) * x_huber ** 2 / 2 + (abs(x_huber) > d) * d * (abs(x_huber) - d / 2), label=r'huber loss: $\delta$={}'.format(d), lw=2)
plt.ylim(-1, 1000)
plt.legend(loc='upper right')
plt.grid()
# sub fig -- all loss function
plt.subplot(339)
x = np.linspace(-2, 3, 1001, dtype=float)
plt.plot(x, np.log((1 + np.exp(-x))) / np.log(2), 'r--', label='Logistics Loss', lw=2)
plt.plot(x, np.exp(-x), 'k-', label='Adaboost Loss', lw=1)
plt.plot(x, x < 0, 'y-', label='0/1 Loss', lw=1)
tmp_hinge = 1.0 - x
tmp_hinge[tmp_hinge < 0] = 0
plt.plot(x, tmp_hinge, 'b-', label='Hinge Loss', lw=1)
plt.legend(loc='best')
plt.grid()
# save
plt.savefig("loss_function.png")
# show
plt.show()
if __name__ == '__main__':
# act_functions()
loss_functions()
| {"hexsha": "271e15451543bb08b2bf002df319d15cca225fb7", "size": 4696, "ext": "py", "lang": "Python", "max_stars_repo_path": "dl_tutorials/torch_tutorial.py", "max_stars_repo_name": "learnerzhang/AnalyticsVidhya", "max_stars_repo_head_hexsha": "697689a24a9d73785164512cab8ac4ee5494afe8", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-07-04T09:14:26.000Z", "max_stars_repo_stars_event_max_datetime": "2018-07-04T09:14:26.000Z", "max_issues_repo_path": "dl_tutorials/torch_tutorial.py", "max_issues_repo_name": "learnerzhang/AnalyticsVidhya", "max_issues_repo_head_hexsha": "697689a24a9d73785164512cab8ac4ee5494afe8", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dl_tutorials/torch_tutorial.py", "max_forks_repo_name": "learnerzhang/AnalyticsVidhya", "max_forks_repo_head_hexsha": "697689a24a9d73785164512cab8ac4ee5494afe8", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1445086705, "max_line_length": 165, "alphanum_fraction": 0.5973168654, "include": true, "reason": "import numpy", "num_tokens": 1608} |
/* -*- Mode: C++; tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- */
/*
* Copyright 2018 Couchbase, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#pragma once
#include "engine_error.h"
#include "types.h"
#include <gsl/gsl>
namespace cb {
namespace audit {
namespace document {
enum class Operation;
}
} // namespace audit
} // namespace cb
struct ServerDocumentIface {
virtual ~ServerDocumentIface() = default;
/**
* This callback is called from the underlying engine right before
* it is linked into the list of available documents (it is currently
* not visible to anyone). The engine should have validated all
* properties set in the document by the client and the core, and
* assigned a new CAS number for the document (and sequence number if
* the underlying engine use those).
*
* The callback may at this time do post processing of the document
* content (it is allowed to modify the content data, but not
* reallocate or change the size of the data in any way).
*
* Given that the engine MAY HOLD LOCKS when calling this function
* the core is *NOT* allowed to acquire *ANY* locks (except for doing
* some sort of memory allocation for a temporary buffer).
*
* @param cookie The cookie provided to the engine for the storage
* command which may (which may hold more context)
* @param info the items underlying data
* @return ENGINE_SUCCESS means that the underlying engine should
* proceed to link the item. All other
* error codes means that the engine should
* *NOT* link the item
*/
virtual ENGINE_ERROR_CODE pre_link(gsl::not_null<const void*> cookie,
item_info& info) = 0;
/**
* This callback is called from the underlying engine right before
* a particular document expires. The callback is responsible examining
* the value and possibly returning a new and modified value.
*
* @param itm_info info pertaining to the item that is to be expired.
* @return std::string empty if the value required no modification, not
* empty then the string contains the modified value. When not empty
* the datatype of the new value is datatype xattr only.
*
* @throws std::bad_alloc in case of memory allocation failure
*/
virtual std::string pre_expiry(const item_info& itm_info) = 0;
/**
* Add an entry to the audit trail for access to the document specified
* in the key for this cookie.
*
* @param cookie The cookie representing the operation
* @param operation The type of access for the operation
*/
virtual void audit_document_access(
gsl::not_null<const void*> cookie,
cb::audit::document::Operation operation) = 0;
};
| {"hexsha": "d6ea0c7208fa1f3c304a500243ce2b008586588a", "size": 3478, "ext": "h", "lang": "C", "max_stars_repo_path": "include/memcached/server_document_iface.h", "max_stars_repo_name": "hrajput89/kv_engine", "max_stars_repo_head_hexsha": "33fb1ab2c9787f55555e5f7edea38807b3dbc371", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2019-06-13T07:33:09.000Z", "max_stars_repo_stars_event_max_datetime": "2019-06-13T07:33:09.000Z", "max_issues_repo_path": "include/memcached/server_document_iface.h", "max_issues_repo_name": "paolococchi/kv_engine", "max_issues_repo_head_hexsha": "40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/memcached/server_document_iface.h", "max_forks_repo_name": "paolococchi/kv_engine", "max_forks_repo_head_hexsha": "40256dca6bf77fb4bcc18e8ef7d9b8f991bf4e45", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2019-10-11T14:00:49.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-06T09:20:15.000Z", "avg_line_length": 39.5227272727, "max_line_length": 80, "alphanum_fraction": 0.6627372053, "num_tokens": 758} |
import numpy as np
from util import util
from config.draco3_lb_config import PnCConfig, WBCConfig
from pnc.wbc.ihwbc.ihwbc import IHWBC
from pnc.wbc.ihwbc.joint_integrator import JointIntegrator
class Draco3LBController(object):
def __init__(self, tci_container, robot):
self._tci_container = tci_container
self._robot = robot
# Initialize WBC
l_jp_idx, l_jd_idx, r_jp_idx, r_jd_idx = self._robot.get_q_dot_idx(
['l_knee_fe_jp', 'l_knee_fe_jd', 'r_knee_fe_jp', 'r_knee_fe_jd'])
act_list = [False] * robot.n_floating + [True] * robot.n_a
act_list[l_jd_idx] = False
act_list[r_jd_idx] = False
n_q_dot = len(act_list)
n_active = np.count_nonzero(np.array(act_list))
n_passive = n_q_dot - n_active - 6
self._sa = np.zeros((n_active, n_q_dot))
self._sv = np.zeros((n_passive, n_q_dot))
j, k = 0, 0
for i in range(n_q_dot):
if i >= 6:
if act_list[i]:
self._sa[j, i] = 1.
j += 1
else:
self._sv[k, i] = 1.
k += 1
self._sf = np.zeros((6, n_q_dot))
self._sf[0:6, 0:6] = np.eye(6)
self._ihwbc = IHWBC(self._sf, self._sa, self._sv, PnCConfig.SAVE_DATA)
if WBCConfig.B_TRQ_LIMIT:
self._ihwbc.trq_limit = np.dot(self._sa[:, 6:],
self._robot.joint_trq_limit)
self._ihwbc.lambda_q_ddot = WBCConfig.LAMBDA_Q_DDOT
self._ihwbc.lambda_rf = WBCConfig.LAMBDA_RF
# Initialize Joint Integrator
self._joint_integrator = JointIntegrator(robot.n_a,
PnCConfig.CONTROLLER_DT)
self._joint_integrator.pos_cutoff_freq = WBCConfig.POS_CUTOFF_FREQ
self._joint_integrator.vel_cutoff_freq = WBCConfig.VEL_CUTOFF_FREQ
self._joint_integrator.max_pos_err = WBCConfig.MAX_POS_ERR
self._joint_integrator.joint_pos_limit = self._robot.joint_pos_limit
self._joint_integrator.joint_vel_limit = self._robot.joint_vel_limit
self._b_first_visit = True
def get_command(self):
if self._b_first_visit:
self.first_visit()
# Dynamics properties
mass_matrix = self._robot.get_mass_matrix()
mass_matrix_inv = np.linalg.inv(mass_matrix)
coriolis = self._robot.get_coriolis()
gravity = self._robot.get_gravity()
self._ihwbc.update_setting(mass_matrix, mass_matrix_inv, coriolis,
gravity)
# Task, Contact, and Internal Constraint Setup
w_hierarchy_list = []
for task in self._tci_container.task_list:
task.update_jacobian()
task.update_cmd()
w_hierarchy_list.append(task.w_hierarchy)
self._ihwbc.w_hierarchy = np.array(w_hierarchy_list)
for contact in self._tci_container.contact_list:
contact.update_contact()
for internal_constraint in self._tci_container.internal_constraint_list:
internal_constraint.update_internal_constraint()
# WBC commands
joint_trq_cmd, joint_acc_cmd, rf_cmd = self._ihwbc.solve(
self._tci_container.task_list, self._tci_container.contact_list,
self._tci_container.internal_constraint_list)
joint_trq_cmd = np.dot(self._sa[:, 6:].transpose(), joint_trq_cmd)
joint_acc_cmd = np.dot(self._sa[:, 6:].transpose(), joint_acc_cmd)
# Double integration
joint_vel_cmd, joint_pos_cmd = self._joint_integrator.integrate(
joint_acc_cmd, self._robot.joint_velocities,
self._robot.joint_positions)
command = self._robot.create_cmd_ordered_dict(joint_pos_cmd,
joint_vel_cmd,
joint_trq_cmd)
return command
def first_visit(self):
joint_pos_ini = self._robot.joint_positions
self._joint_integrator.initialize_states(np.zeros(self._robot.n_a),
joint_pos_ini)
self._b_first_visit = False
| {"hexsha": "f713ea8db528032ad33727686296727e18b4ed76", "size": 4232, "ext": "py", "lang": "Python", "max_stars_repo_path": "pnc/draco3_lb_pnc/draco3_lb_controller.py", "max_stars_repo_name": "MaxxWilson/ASE389Project", "max_stars_repo_head_hexsha": "13c3c72887e27fbed2eef63c1e27b4a185036a39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2021-05-31T10:55:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:09:37.000Z", "max_issues_repo_path": "pnc/draco3_lb_pnc/draco3_lb_controller.py", "max_issues_repo_name": "MaxxWilson/ASE389Project", "max_issues_repo_head_hexsha": "13c3c72887e27fbed2eef63c1e27b4a185036a39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-10-01T22:11:43.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T02:34:33.000Z", "max_forks_repo_path": "pnc/draco3_lb_pnc/draco3_lb_controller.py", "max_forks_repo_name": "MaxxWilson/ASE389Project", "max_forks_repo_head_hexsha": "13c3c72887e27fbed2eef63c1e27b4a185036a39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-08-24T00:53:18.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T17:29:07.000Z", "avg_line_length": 41.900990099, "max_line_length": 80, "alphanum_fraction": 0.6176748582, "include": true, "reason": "import numpy", "num_tokens": 1038} |
\chapter{Results}\label{chap:results}
Clustering for \bslong{} and investigating the \krap{} led us to focus on a volley of metrics in order to understand the nature of the data.
For clustering, we were interested in both the nature of the clusters --- the size, purity, and unique \spec{} within each --- as well as the clustering coverage --- how many \cplop{} \isols{} made it into a cluster, given a specific \minneigh{} value.
For \krap{}, we adjusted the values of \k{} and \a{} for each resolution strategy to gauge how effective each were at classification.
\input{chapters/results/clustering}
\input{chapters/results/classifying} | {"hexsha": "3041b97e66a058af94610f4127b6087bcb029447", "size": 639, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "chapters/results/000.tex", "max_stars_repo_name": "jmcgover/thesis", "max_stars_repo_head_hexsha": "25664684158d00864dbe697276d2691ba84461cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "chapters/results/000.tex", "max_issues_repo_name": "jmcgover/thesis", "max_issues_repo_head_hexsha": "25664684158d00864dbe697276d2691ba84461cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "chapters/results/000.tex", "max_forks_repo_name": "jmcgover/thesis", "max_forks_repo_head_hexsha": "25664684158d00864dbe697276d2691ba84461cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 91.2857142857, "max_line_length": 252, "alphanum_fraction": 0.7589984351, "num_tokens": 152} |
#!/usr/bin/python3
#-*-coding:utf-8-*-
import json
import sys
import functools
import math
import numpy as np
import matplotlib.pyplot as plt
import csv
import os
from tablet import *
from bootstrap import *
from plot import *
#Parse all the files
pairData = {}
files = []
outputDir = os.getcwd()
argv = sys.argv[1:]
i = 0
while i < len(argv):
#Read file names
arg = argv[i]
if not arg.startswith("--"):
files.append(arg)
i+=1
else:
while i < len(argv):
arg = argv[i]
if arg == "--output":
if i < len(argv)-1:
outputDir = argv[i+1]
i+=1
else:
print("Missing directory path value to '--output' parameter")
sys.exit(-1)
else:
print("Unknown parameter {}".format(arg))
sys.exit(-1)
i+=1
#Check length of the command line arguments
if len(files) == 0:
print("Run ./extractCHI20Data.py <jsonPath1> [jsonPathN...] [--output dirOutput] [--show]")
sys.exit(-1)
try:
for fileName in files:
print("Opening {}".format(fileName))
with open(fileName, "r") as f:
try:
tabletData = [TabletData(0, None), TabletData(1, None)]
jsonData = json.load(f) #The JSON data
currentStudyID = -1
currentTrial = -1 #current trial ID
currentTargetPosition = None #current target position for this trial
currentTabletID = -1 #current tablet ID for this trial
currentTrialOffset = 0 #time offset for when this trial started
currentDatasetScaling = np.array([1.0, 1.0, 1.0]) #The current dataset scaling
#Look over all the json object saved by the server
for obj in jsonData["data"]:
name = obj["type"]
offset = obj["timeOffset"]
sender = obj["sender"]
headsetIP = obj["headsetIP"].split(':')
#Track the tablets' ID
if name == "HeadsetBindingInfo" and headsetIP[1] == "Tablet":
if bool(obj["tabletConnected"]):
#Update the tablet information
tabletID = int(obj["tabletID"])
ip = headsetIP[0]
for tablet in tabletData:
if tablet.tabletID == tabletID:
tablet.headsetIP = ip
break
if name == "SetPairID":
pairID = obj["pairID"]
for t in tabletData:
t.pairID = pairID
#Track the trial ID and annotation's target position
elif name == "SendNextTrial" and sender == "Server":
currentTrial = obj["currentTrialID"]
currentTargetPosition = np.array(obj["annotationPos"])
currentTabletID = obj["currentTabletID"]
currentTrialOffset = offset
currentStudyID = obj["currentStudyID"]
elif name == "ScaleDataset": #Take account of the scaling
if obj["datasetID"] == 0 and obj["subDatasetID"] == 0 and sender == "Server" and obj["inPublic"] == 1: #Correct dataset, correct sender, in public space
currentDatasetScaling = np.array(obj["scale"])
#When the study is running (i.e., no training)
if currentTrial != -1 and currentStudyID != -1:
#Track when an annotation started
#This will initialize a new annotation. We track the server sending message.
if name == "StartAnnotation" and sender == "Server":
if obj["datasetID"] == 0 and obj["subDatasetID"] == 0 and obj["inPublic"] == 1: #Only the main dataset counts
pointingID = obj["pointingID"]
for tablet in tabletData:
if tablet.headsetIP == headsetIP[0] and tablet.tabletID == currentTabletID:
tablet.initAnnotation(currentStudyID, currentTrial, pointingID, currentTargetPosition, currentTrialOffset, offset)
break
#Anchor the annotation
#This will finalize the initialized annotation. We track the server sending message.
elif name == "AnchorAnnotation" and sender == "Server":
if obj["datasetID"] == 0 and obj["subDatasetID"] == 0 and obj["inPublic"] == 1: #Only the main dataset counts
for tablet in tabletData:
if tablet.headsetIP == headsetIP[0] and tablet.tabletID == currentTabletID:
anchorPos = np.array(obj["localPos"])
tablet.commitAnnotation(anchorPos, offset, currentDatasetScaling)
break
pairData[fileName] = tabletData
except json.JSONDecodeError as jsonErr:
print("Could not parse the json file. Error : {0}".format(jsonErr))
sys.exit(-1)
except IOError as err:
print("Could not open the file {0}. Error : {1}".format(sys.argv[1], err))
sys.exit(-1)
def getAnnotationsStudy2(pair):
"""Get the list of the annotations for a pair of participants in study 2
@param pair the pair of participant"""
annotations = functools.reduce(lambda x, y : x + y, [y.annotations + z.annotations for y, z in zip(pair[0].study2Data, pair[1].study2Data)], [])
return annotations
def computeStudy2Data(pairData, pointingID):
"""Compute the data for a giving pointing technique.
@param pairData the data of pairs (dict)
@param pointingID the pointingID to look at
@return a tuple (pIDs, accs, annotTCTs, trialTCTs)"""
#Annotations
annots = functools.reduce(lambda x, y: x+y, [getAnnotationsStudy2(pair) for pair in pairData.values()], [])
#Participant IDs
pIDs = [ann.pID for ann in annots if ann.pointingID==pointingID]
#Trial IDs
trialIDs = [ann.trialID for ann in annots if ann.pointingID==pointingID]
#Accuracy in World Space
accs = [ann.worldAccuracy for ann in annots if ann.pointingID==pointingID]
#TCT from the start of the annotation
annotTCTs = [ann.annotTCT for ann in annots if ann.pointingID==pointingID]
#TCT from the start of the trial
trialTCTs = [ann.trialTCT for ann in annots if ann.pointingID==pointingID]
return (np.array(pIDs), np.array(trialIDs), np.array(accs), np.array(annotTCTs), np.array(trialTCTs))
def computeBootstrap(data, pointingID):
"""Compute the 95% Bootstrap CI from a list of annotations
@param data tuple of np.array containing (accs, annotTCTs, trialTCTs)
@param pointingID the pointingID to look at
@return a tuple (acc_avg, acc_std, tct_trial_avg, tct_trial_std, tct_annot_avg, tct_annot_std).
Each value of the tuple contains a list containing two values. The first one containing the annotation TCT and the second one containing the trial TCT"""
accs, annotTCTs, trialTCTs = data
#Accuracy
accBootstrap = bootstrap(accs, 5000)
accAvg, accStd = getMeanAndStd(accBootstrap(0.95))
#TCT
annotTCTBootstrap = bootstrap(annotTCTs, 5000)
trialTCTBootstrap = bootstrap(trialTCTs, 5000)
tctBootstrap = [annotTCTBootstrap(0.95), trialTCTBootstrap(0.95)]
tctAvgs, tctStds = getMeansAndStds(tctBootstrap)
return (accAvg, accStd, #Avg
tctAvgs[1], tctStds[1], #Trial TCT
tctAvgs[0], tctStds[0]) #Annot TCT
def study2Pipeline(pairData):
"""Compute the data for the second part of the study
This will generate a list of PDFs permitting to visualize the dataset
@param pairData the data of pairs of participants"""
#Get the pointingData and the maximum axis for the TCT values
pointingData = {}
maxAxisTCT = 0
maxAxisAcc = 0
pointingITs = [POINTINGID_GOGO, POINTINGID_WIM, POINTINGID_MANUAL]
for p in pointingITs:
data = computeStudy2Data(pairData, p)
csvFilePath = "{}/pointing_{}.csv".format(outputDir, p)
print("Saving {}".format(csvFilePath))
with open(csvFilePath, "w") as csvFile:
writer = csv.writer(csvFile, delimiter=',')
writer.writerow(["pID", "trialID", "acc", "annotTCT", "trialTCT"])
for r in zip(*data):
writer.writerow(r)
cis = computeBootstrap(data[-3:], p)
pointingData[p] = cis
maxAxisTCT = max(maxAxisTCT, cis[2]+cis[3]) #Compute the maximum TCT axis length based on the trial parameter (which is bigger than the annotation parameter)
maxAxisAcc = max(maxAxisAcc, cis[0]+cis[1]) #Compute the maximum Acc axis length
#Print the PDFs
for p in pointingITs:
#Print the time completion task graphs
tctAvgs = [pointingData[p][4], pointingData[p][2]]
tctStds = [pointingData[p][5], pointingData[p][3]]
filePathTCT = "{}/tct_{}.pdf".format(outputDir, p)
filePathAcc = "{}/acc_{}.pdf".format(outputDir, p)
print("Saving {}".format(filePathTCT))
drawCIs(filePathTCT, tctAvgs, tctStds, ["Annotation TCT", "Trial TCT"], "#CCCCCC", maxAxis=maxAxisTCT)
print("Saving {}".format(filePathAcc))
drawCIs(filePathAcc, [pointingData[p][0]], [pointingData[p][1]], ["Accuracy"], "#CCCCCC", maxAxis=maxAxisAcc)
study2Pipeline(pairData)
| {"hexsha": "2a2ee5cd52ff6358747655e5e696d8d7d6e6bd09", "size": 10010, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/extractCHI20Data/extractCHI20Data.py", "max_stars_repo_name": "MickaelSERENO/SciVis_Server", "max_stars_repo_head_hexsha": "35b07c832d053b49e4f3f9f85b5168dddedcaf5e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "scripts/extractCHI20Data/extractCHI20Data.py", "max_issues_repo_name": "MickaelSERENO/SciVis_Server", "max_issues_repo_head_hexsha": "35b07c832d053b49e4f3f9f85b5168dddedcaf5e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/extractCHI20Data/extractCHI20Data.py", "max_forks_repo_name": "MickaelSERENO/SciVis_Server", "max_forks_repo_head_hexsha": "35b07c832d053b49e4f3f9f85b5168dddedcaf5e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.5217391304, "max_line_length": 176, "alphanum_fraction": 0.5712287712, "include": true, "reason": "import numpy", "num_tokens": 2349} |
import Arena
from MCTS import MCTS
from ataxx.AtaxxGame import AtaxxGame
from ataxx.AtaxxPlayers import *
from ataxx.pytorch.NNet import NNetWrapper as NNet
import numpy as np
from utils import *
import argparse
"""
use this script to play any two agents against each other, or play manually with
any agent.
"""
parser = argparse.ArgumentParser()
parser.add_argument('-f', '--model-file', default='./temp/best.pth.tar', metavar='PATH', help='Path to model file. Default is ./temp/best.pth.tar')
parser.add_argument('-m', '--mcts', default=300, type=int, metavar='N', help='Number of MCTS simulation per turn')
parser.add_argument('-p', '--player', default='human', metavar='P', help='AI vs PLAYER. Default P is human', choices=['human', 'random', 'greedy', 'ai'])
parser.add_argument('-g', '--model-file-p2', default='./temp/best.pth.tar', metavar='PATH', help='Path to model file of second ai player.')
# parser.add_argument('')
a = parser.parse_args()
path = a.model_file.split('/')
model_dir = '/'.join(path[:-1])
model_file = path[-1]
path2 = a.model_file_p2.split('/')
model_dir2 = '/'.join(path2[:-1])
model_file2 = path2[-1]
g = AtaxxGame(7)
# all players
rp = RandomPlayer(g).play
gp = GreedyAtaxxPlayer(g).play
hp = HumanAtaxxPlayer(g).play
# nnet players
n1 = NNet(g)
n1.load_checkpoint(model_dir, model_file)
args1 = dotdict({'numMCTSSims': a.mcts, 'cpuct':1.0})
mcts1 = MCTS(g, n1, args1)
n1p = lambda x: np.argmax(mcts1.getActionProb(x, temp=0))
if a.player == 'human':
player2 = hp
elif a.player == 'random':
player2 = rp
elif a.player == 'greedy':
player2 = gp
else:
n2 = NNet(g)
n2.load_checkpoint(model_dir2, model_file2)
args2 = dotdict({'numMCTSSims': a.mcts, 'cpuct': 1.0})
mcts2 = MCTS(g, n2, args2)
n2p = lambda x: np.argmax(mcts2.getActionProb(x, temp=0))
player2 = n2p # Player 2 is neural network if it's cpu vs cpu.
arena = Arena.Arena(n1p, player2, g, display=AtaxxGame.display)
print(arena.playGames(2, verbose=True))
| {"hexsha": "11ab1d191acd1688481e5224ae3d73301c5fee38", "size": 1999, "ext": "py", "lang": "Python", "max_stars_repo_path": "pit.py", "max_stars_repo_name": "2JS/alpha-zero-general", "max_stars_repo_head_hexsha": "d172ee5b20b80fb3a9eacd4a8b8437e570131a7e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pit.py", "max_issues_repo_name": "2JS/alpha-zero-general", "max_issues_repo_head_hexsha": "d172ee5b20b80fb3a9eacd4a8b8437e570131a7e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-09-06T10:20:42.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-16T00:10:41.000Z", "max_forks_repo_path": "pit.py", "max_forks_repo_name": "2JS/alpha-zero-general", "max_forks_repo_head_hexsha": "d172ee5b20b80fb3a9eacd4a8b8437e570131a7e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.3970588235, "max_line_length": 153, "alphanum_fraction": 0.6923461731, "include": true, "reason": "import numpy", "num_tokens": 605} |
from jesse.helpers import get_candle_source, slice_candles, np_shift
import numpy as np
from numba import njit
import talib
from typing import Union
from jesse.helpers import get_config
from collections import namedtuple
'''
https://www.tradingview.com/script/NgLjvBWA-RedK-Compound-Ratio-Moving-Average-CoRa-Wave/#chart-view-comments
'''
def compma(candles: np.ndarray, length: int= 20, ratio: float=2.0,smooth: bool = True,man_smooth:int=1, source_type: str = "close", sequential: bool = False ) -> Union[float, np.ndarray]:
candles = slice_candles(candles, sequential)
source = get_candle_source(candles, source_type=source_type)
cora_raw = fast_comp(source,length,0.01,ratio)
s = np.maximum(np.round(np.sqrt(length)),1) if smooth else man_smooth
cora_wave = pine_wma(cora_raw,s)
if sequential:
return cora_wave
else:
return cora_wave[-1]
@njit
def fast_comp(source,length,start_wt,ratio):
r = np.full_like(source,0)
base = np.full_like(source,0)
res = np.full_like(source,0)
for i in range(source.shape[0]):
r[i] = np.power((length/start_wt),(1/(length - 1)))-1
base[i] = 1 + r[i] * ratio
c_weight = 0.0
numerator = 0.0
denom = 0.0
for j in range(length):
c_weight = start_wt * np.power(base[i-j],(length - j))
numerator = numerator + source[i-j] * c_weight
denom = denom + c_weight
res[i] = numerator/denom
return res
@njit
def pine_wma(source,length):
res = np.full_like(source,length)
for i in range(source.shape[0]):
weight = 0.0
norm = 0.0
sum1 = 0.0
for j in range(length):
weight = (length - j)*length
norm = norm + weight
sum1 = sum1 + source[i-j] * weight
res[i] = sum1/norm
return res
| {"hexsha": "456bcb9dda3e26676ac876dab9ab99d062a70f40", "size": 1871, "ext": "py", "lang": "Python", "max_stars_repo_path": "Indicators/compound_ratio_ma.py", "max_stars_repo_name": "Desil-sketch/Indicators-for-Jesse", "max_stars_repo_head_hexsha": "ffe33a217002ea3034696fe38acfa72611d52b4f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-08T06:34:48.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T06:34:48.000Z", "max_issues_repo_path": "Indicators/compound_ratio_ma.py", "max_issues_repo_name": "Desil-sketch/Indicators-for-Jesse", "max_issues_repo_head_hexsha": "ffe33a217002ea3034696fe38acfa72611d52b4f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Indicators/compound_ratio_ma.py", "max_forks_repo_name": "Desil-sketch/Indicators-for-Jesse", "max_forks_repo_head_hexsha": "ffe33a217002ea3034696fe38acfa72611d52b4f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-04T17:40:19.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-04T17:40:19.000Z", "avg_line_length": 33.4107142857, "max_line_length": 191, "alphanum_fraction": 0.6328166756, "include": true, "reason": "import numpy,from numba", "num_tokens": 528} |
module spectral_wave_data_def
use, intrinsic :: iso_fortran_env, only: int64
use kind_values, only: knd => kind_swd_interface, wp => kind_swd_internal
use spectral_wave_data_error, only: swd_error
implicit none
private
! This module provides the abstract base class for spectral_wave_data_X objects.
!
! Written by Jens Bloch Helmers, August, 2. 2019
!
!------------------------------------------------------------------------------
!##############################################################################
!
! B E G I N P U B L I C Q U A N T I T I E S
!
!------------------------------------------------------------------------------
!
public :: spectral_wave_data
!
!------------------------------------------------------------------------------
!
! E N D P U B L I C Q U A N T I T I E S
!
!##############################################################################
type, abstract :: spectral_wave_data
! Common attributes for all shape classes
character(len=30) :: prog ! Name of the program who created this swd file including version.
character(len=20) :: date ! Date and time this swd file was created
character(len=200) :: file ! Name of swd file
integer :: unit ! Unit number associated with swd file
integer :: fmt ! Code to identify format of swd file.
integer :: shp ! Index of actual spectral shape class
integer :: amp ! Index of which spectral amplitudes are available
character(len=:), allocatable :: cid ! Identification text in swd file
integer :: nstrip ! Number of initial time steps removed from original simulation
integer :: nsteps ! Total number of time steps in swd file.
integer :: order ! Order of perturbation (<0 if fully nonlinear) applied in wave generator
integer :: norder ! Expansion order to apply in kinematics for z>0
! <0: apply exp(kj z)
! 0: apply expansion order specified on swd file
! >0: apply expansion order = norder
integer :: ipol ! Index defining the temporal interpolation scheme
real(wp) :: dt ! Constant time step in swd file
real(wp) :: t0 ! Input seed for time (t0>=0)
real(wp) :: x0,y0 ! Input seed for spatial location
real(wp) :: tswd ! Current swd time
real(wp) :: grav ! Acceleration of gravity
real(wp) :: lscale ! Number of length units in wave generator per meter.
real(wp) :: rho ! Density of water
real(wp) :: cbeta ! cos(beta), beta=angle between swd and application x-axis
real(wp) :: sbeta ! sin(beta), beta=angle between swd and application x-axis
real(wp) :: tmax ! Maximum allowed simulation time (user system)
integer :: size_complex ! On most systems size_complex=8 for c_float based numbers
integer :: size_step ! Fortran storage size per time step
integer(int64) :: ipos0 ! File postion where temporal functions starts
logical :: eof ! End-of-file detected for SWD file
logical :: dc_bias ! True: apply zero frequency amplitudes from SWD file.
! False: Suppress contribution from zero frequency amplitudes (Default)
type(swd_error) :: error ! Abort free error handler
contains
procedure(update_time), deferred :: update_time ! Obtain spectral data for current time
procedure(phi), deferred :: phi ! Calculate potential at location for current time
procedure(stream), deferred :: stream ! Calculate stream function
procedure(phi_t), deferred :: phi_t ! Calculate d(potential)/dt (Euler) at location for current time
procedure(grad_phi), deferred :: grad_phi ! Calculate particle velocity at location for current time
procedure(grad_phi_2nd),deferred :: grad_phi_2nd ! Calculate second order spatial gradients of potential
procedure(acc_euler), deferred :: acc_euler ! Calculate Euler acceleration (grad(phi_t)) at location for current time
procedure(acc_particle),deferred :: acc_particle ! Calculate particle acceleration at location for current time
procedure(elev), deferred :: elev ! Calculate surface elevation at location for current time
procedure(elev_t), deferred :: elev_t ! Calculate d(surface elevation)/dt (Euler) at location for current time
procedure(grad_elev), deferred :: grad_elev ! Calculate gradient of surface elevation at location for current time
procedure(grad_elev_2nd),deferred:: grad_elev_2nd ! Calculate second order spatial gradients of elevation
procedure(pressure), deferred :: pressure ! Fully nonlinear Bernoulli pressure
procedure(bathymetry), deferred :: bathymetry ! Return local depth at application position (x, y)
procedure(bathymetry_nvec),deferred :: bathymetry_nvec ! Unit normal vector of sea floor into the ocean at (x,y)
procedure(convergence), deferred :: convergence ! For a specific (t,x,y,z) return a csv-file on how particle velocity, elevation
! and pressure converge as a function of number of spectral components
procedure(strip), deferred :: strip ! Create a new SWD file based on a time window of current SWD file.
procedure(get_int), deferred :: get_int ! Extract a specified int parameter
procedure(get_logical), deferred :: get_logical ! Extract a specified logical parameter
procedure(get_real), deferred :: get_real ! Extract a specified float parameter
procedure(get_chr), deferred :: get_chr ! Extract a specified char parameter
procedure(close), deferred :: close ! Manual destructor
procedure :: error_raised ! Return .true. if error has been signaled
procedure :: error_id ! Return error id
procedure :: error_msg ! Return error message
procedure :: error_clear ! Clear error signal (id=0)
end type spectral_wave_data
abstract interface
!
subroutine update_time(self, time)
import
class(spectral_wave_data), intent(inout) :: self ! Update data in memory (if needed)
real(knd), intent(in) :: time ! Current time in simulation program
end subroutine update_time
function phi(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res ! Potential at (x,y,z)
end function phi
function stream(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res ! Stream function value at (x,y,z)
end function stream
function phi_t(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res ! Euler time derivative of potential at (x,y,z)
end function phi_t
function grad_phi(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res(3) ! Particle velocity at (x,y,z)
end function grad_phi
function grad_phi_2nd(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res(6) ! Second order gradients of potential at (x,y,z)
! res(1) = d^2(potential) / dx^2
! res(2) = d^2(potential) / dx dy
! res(3) = d^2(potential) / dx dz
! res(4) = d^2(potential) / dy^2
! res(5) = d^2(potential) / dy dz
! res(6) = d^2(potential) / dz^2
end function grad_phi_2nd
function acc_euler(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res(3) ! Euler acceleration at (x,y,z)
end function acc_euler
function acc_particle(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res(3) ! Particle acceleration at (x,y,z)
end function acc_particle
function elev(self, x, y) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y ! Position application program
real(knd) :: res ! Surface elevation at (x,y)
end function elev
function elev_t(self, x, y) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y ! Position application program
real(knd) :: res ! d/dt of surface elevation at (x,y)
end function elev_t
function grad_elev(self, x, y) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y ! Position application program
real(knd) :: res(3) ! x, y and z gradients of surface elevation at (x,y)
end function grad_elev
function grad_elev_2nd(self, x, y) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y ! Position application program
real(knd) :: res(3) ! Second order gradients of surface elevation
! res(1) = d^2(elevation) / dx^2
! res(2) = d^2(elevation) / dx dy
! res(3) = d^2(elevation) / dy^2
end function grad_elev_2nd
function pressure(self, x, y, z) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
real(knd) :: res ! Nonlinear pressure
end function pressure
function bathymetry(self, x, y) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y ! Position application program
real(knd) :: res ! Local depth at (x,y)
end function bathymetry
function bathymetry_nvec(self, x, y) result(res)
import
class(spectral_wave_data), intent(in) :: self ! Actual class
real(knd), intent(in) :: x,y ! Position application program
real(knd) :: res(3) ! Unit normal vector of sea floor into the ocean at (x,y)
end function bathymetry_nvec
subroutine convergence(self, x, y, z, csv)
import
class(spectral_wave_data), intent(inout) :: self ! Actual class
real(knd), intent(in) :: x,y,z ! Position application program
character(len=*), intent(in) :: csv ! Name of output csv-file
end subroutine convergence
subroutine strip(self, tmin, tmax, file_swd)
! Create a new swd file containing the spectral information limited
! to the application time window: tmin <= t <= tmax.
import
class(spectral_wave_data), intent(inout) :: self ! Actual class
real(knd), intent(in) :: tmin, tmax ! Time window application program
character(len=*), intent(in) :: file_swd ! Name of new swd file
end subroutine strip
function get_int(self, name) result(res)
import
class(spectral_wave_data), intent(inout) :: self ! Actual class
character(len=*), intent(in) :: name ! Name of int parameter
integer :: res ! Value of name parameter
end function get_int
function get_logical(self, name) result(res)
import
class(spectral_wave_data), intent(inout) :: self ! Actual class
character(len=*), intent(in) :: name ! Name of logical parameter
logical :: res ! Value of name parameter
end function get_logical
function get_real(self, name) result(res)
import
class(spectral_wave_data), intent(inout) :: self ! Actual class
character(len=*), intent(in) :: name ! Name of real parameter
real(knd) :: res ! Value of name parameter
end function get_real
function get_chr(self, name) result(res)
import
class(spectral_wave_data), intent(inout) :: self ! Actual class
character(len=*), intent(in) :: name ! Name of char parameter
character(len=:), allocatable :: res ! Value of name parameter
end function get_chr
subroutine close(self)
import
class(spectral_wave_data) :: self ! Object to destruct
end subroutine close
end interface
contains
!==============================================================================
function error_raised(self) result(res)
class(spectral_wave_data), intent(in) :: self ! Error handler
logical :: res ! .true. if error has been signaled
!
res = self % error % raised()
!
end function error_raised
!==============================================================================
function error_id(self) result(res)
class(spectral_wave_data), intent(in) :: self ! Error handler
integer :: res ! Return error code
!
res = self % error % id
!
end function error_id
!==============================================================================
function error_msg(self) result(res)
class(spectral_wave_data), intent(in) :: self ! Error handler
character(len=len_trim(self%error%msg)) :: res ! Return error code
!
res = self % error % msg
!
end function error_msg
!==============================================================================
subroutine error_clear(self)
class(spectral_wave_data), intent(inout) :: self ! Error handler
!
call self % error % clear()
!
end subroutine error_clear
!==============================================================================
end module spectral_wave_data_def
| {"hexsha": "c1ee465f59877733f89527b3d0a227d378518613", "size": 15864, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/api/fortran/spectral_wave_data.f90", "max_stars_repo_name": "TormodLandet/spectral_wave_data", "max_stars_repo_head_hexsha": "c43710e769c3d7d3c4f832ab74e456706b361493", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2020-03-01T19:49:39.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-29T10:21:56.000Z", "max_issues_repo_path": "src/api/fortran/spectral_wave_data.f90", "max_issues_repo_name": "TormodLandet/spectral_wave_data", "max_issues_repo_head_hexsha": "c43710e769c3d7d3c4f832ab74e456706b361493", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/api/fortran/spectral_wave_data.f90", "max_forks_repo_name": "TormodLandet/spectral_wave_data", "max_forks_repo_head_hexsha": "c43710e769c3d7d3c4f832ab74e456706b361493", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-09-01T12:23:30.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-24T17:50:40.000Z", "avg_line_length": 51.674267101, "max_line_length": 138, "alphanum_fraction": 0.5481593545, "num_tokens": 3387} |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara
import numpy as np
from aesara import tensor as at
from aesara.graph.basic import Variable
from aesara.tensor.var import TensorVariable
from arviz import InferenceData
import pymc as pm
from pymc.blocking import DictToArrayBijection
from pymc.distributions.dist_math import rho2sigma
from pymc.variational import opvi
from pymc.variational.opvi import (
Approximation,
Group,
NotImplementedInference,
node_property,
)
__all__ = ["MeanField", "FullRank", "Empirical", "sample_approx"]
@Group.register
class MeanFieldGroup(Group):
R"""Mean Field approximation to the posterior where spherical Gaussian family
is fitted to minimize KL divergence from True posterior. It is assumed
that latent space variables are uncorrelated that is the main drawback
of the method
"""
__param_spec__ = dict(mu=("d",), rho=("d",))
short_name = "mean_field"
alias_names = frozenset(["mf"])
@node_property
def mean(self):
return self.params_dict["mu"]
@node_property
def rho(self):
return self.params_dict["rho"]
@node_property
def cov(self):
var = rho2sigma(self.rho) ** 2
return at.diag(var)
@node_property
def std(self):
return rho2sigma(self.rho)
@aesara.config.change_flags(compute_test_value="off")
def __init_group__(self, group):
super().__init_group__(group)
if not self._check_user_params():
self.shared_params = self.create_shared_params(self._kwargs.get("start", None))
self._finalize_init()
def create_shared_params(self, start=None):
start = self._prepare_start(start)
rho = np.zeros((self.ddim,))
return {
"mu": aesara.shared(pm.floatX(start), "mu"),
"rho": aesara.shared(pm.floatX(rho), "rho"),
}
@node_property
def symbolic_random(self):
initial = self.symbolic_initial
sigma = self.std
mu = self.mean
return sigma * initial + mu
@node_property
def symbolic_logq_not_scaled(self):
z0 = self.symbolic_initial
std = rho2sigma(self.rho)
logdet = at.log(std)
quaddist = -0.5 * z0**2 - at.log((2 * np.pi) ** 0.5)
logq = quaddist - logdet
return logq.sum(range(1, logq.ndim))
@Group.register
class FullRankGroup(Group):
"""Full Rank approximation to the posterior where Multivariate Gaussian family
is fitted to minimize KL divergence from True posterior. In contrast to
MeanField approach correlations between variables are taken in account. The
main drawback of the method is computational cost.
"""
__param_spec__ = dict(mu=("d",), L_tril=("int(d * (d + 1) / 2)",))
short_name = "full_rank"
alias_names = frozenset(["fr"])
@aesara.config.change_flags(compute_test_value="off")
def __init_group__(self, group):
super().__init_group__(group)
if not self._check_user_params():
self.shared_params = self.create_shared_params(self._kwargs.get("start", None))
self._finalize_init()
def create_shared_params(self, start=None):
start = self._prepare_start(start)
n = self.ddim
L_tril = np.eye(n)[np.tril_indices(n)].astype(aesara.config.floatX)
return {"mu": aesara.shared(start, "mu"), "L_tril": aesara.shared(L_tril, "L_tril")}
@node_property
def L(self):
L = at.zeros((self.ddim, self.ddim))
L = at.set_subtensor(L[self.tril_indices], self.params_dict["L_tril"])
Ld = L[..., np.arange(self.ddim), np.arange(self.ddim)]
L = at.set_subtensor(Ld, rho2sigma(Ld))
return L
@node_property
def mean(self):
return self.params_dict["mu"]
@node_property
def cov(self):
L = self.L
return L.dot(L.T)
@node_property
def std(self):
return at.sqrt(at.diag(self.cov))
@property
def num_tril_entries(self):
n = self.ddim
return int(n * (n + 1) / 2)
@property
def tril_indices(self):
return np.tril_indices(self.ddim)
@node_property
def symbolic_logq_not_scaled(self):
z0 = self.symbolic_initial
diag = at.diagonal(self.L, 0, self.L.ndim - 2, self.L.ndim - 1)
logdet = at.log(diag)
quaddist = -0.5 * z0**2 - at.log((2 * np.pi) ** 0.5)
logq = quaddist - logdet
return logq.sum(range(1, logq.ndim))
@node_property
def symbolic_random(self):
initial = self.symbolic_initial
L = self.L
mu = self.mean
return initial.dot(L.T) + mu
@Group.register
class EmpiricalGroup(Group):
"""Builds Approximation instance from a given trace,
it has the same interface as variational approximation
"""
has_logq = False
__param_spec__ = dict(histogram=("s", "d"))
short_name = "empirical"
@aesara.config.change_flags(compute_test_value="off")
def __init_group__(self, group):
super().__init_group__(group)
self._check_trace()
if not self._check_user_params(spec_kw=dict(s=-1)):
self.shared_params = self.create_shared_params(
trace=self._kwargs.get("trace", None),
size=self._kwargs.get("size", None),
jitter=self._kwargs.get("jitter", 1),
start=self._kwargs.get("start", None),
)
self._finalize_init()
def create_shared_params(self, trace=None, size=None, jitter=1, start=None):
if trace is None:
if size is None:
raise opvi.ParametrizationError("Need `trace` or `size` to initialize")
else:
start = self._prepare_start(start)
# Initialize particles
histogram = np.tile(start, (size, 1))
histogram += pm.floatX(np.random.normal(0, jitter, histogram.shape))
else:
histogram = np.empty((len(trace) * len(trace.chains), self.ddim))
i = 0
for t in trace.chains:
for j in range(len(trace)):
histogram[i] = DictToArrayBijection.map(trace.point(j, t)).data
i += 1
return dict(histogram=aesara.shared(pm.floatX(histogram), "histogram"))
def _check_trace(self):
trace = self._kwargs.get("trace", None)
if isinstance(trace, InferenceData):
raise NotImplementedError(
"The `Empirical` approximation does not yet support `InferenceData` inputs."
" Pass `pm.sample(return_inferencedata=False)` to get a `MultiTrace` to use with `Empirical`."
" Please help us to refactor: https://github.com/pymc-devs/pymc/issues/5884"
)
elif trace is not None and not all(
[self.model.rvs_to_values[var].name in trace.varnames for var in self.group]
):
raise ValueError("trace has not all free RVs in the group")
def randidx(self, size=None):
if size is None:
size = (1,)
elif isinstance(size, TensorVariable):
if size.ndim < 1:
size = size[None]
elif size.ndim > 1:
raise ValueError("size ndim should be no more than 1d")
else:
pass
else:
size = tuple(np.atleast_1d(size))
return self._rng.uniform(
size=size, low=pm.floatX(0), high=pm.floatX(self.histogram.shape[0]) - pm.floatX(1e-16)
).astype("int32")
def _new_initial(self, size, deterministic, more_replacements=None):
aesara_condition_is_here = isinstance(deterministic, Variable)
if size is None:
size = 1
size = at.as_tensor(size)
if aesara_condition_is_here:
return at.switch(
deterministic,
at.repeat(self.mean.reshape((1, -1)), size, -1),
self.histogram[self.randidx(size)],
)
else:
if deterministic:
raise NotImplementedInference(
"Deterministic sampling from a Histogram is broken in v4"
)
return at.repeat(self.mean.reshape((1, -1)), size, -1)
else:
return self.histogram[self.randidx(size)]
@property
def symbolic_random(self):
return self.symbolic_initial
@property
def histogram(self):
return self.params_dict["histogram"]
@node_property
def mean(self):
return self.histogram.mean(0)
@node_property
def cov(self):
x = self.histogram - self.mean
return x.T.dot(x) / pm.floatX(self.histogram.shape[0])
@node_property
def std(self):
return at.sqrt(at.diag(self.cov))
def __str__(self):
if isinstance(self.histogram, aesara.compile.SharedVariable):
shp = ", ".join(map(str, self.histogram.shape.eval()))
else:
shp = "None, " + str(self.ddim)
return f"{self.__class__.__name__}[{shp}]"
def sample_approx(approx, draws=100, include_transformed=True):
"""Draw samples from variational posterior.
Parameters
----------
approx: :class:`Approximation`
Approximation to sample from
draws: `int`
Number of random samples.
include_transformed: `bool`
If True, transformed variables are also sampled. Default is True.
Returns
-------
trace: class:`pymc.backends.base.MultiTrace`
Samples drawn from variational posterior.
"""
return approx.sample(draws=draws, include_transformed=include_transformed)
# single group shortcuts exported to user
class SingleGroupApproximation(Approximation):
"""Base class for Single Group Approximation"""
_group_class = None
def __init__(self, *args, **kwargs):
local_rv = kwargs.get("local_rv")
groups = [self._group_class(None, *args, **kwargs)]
if local_rv is not None:
groups.extend(
[
Group([v], params=p, local=True, model=kwargs.get("model"))
for v, p in local_rv.items()
]
)
super().__init__(groups, model=kwargs.get("model"))
def __getattr__(self, item):
return getattr(self.groups[0], item)
def __dir__(self):
d = set(super().__dir__())
d.update(self.groups[0].__dir__())
return list(sorted(d))
class MeanField(SingleGroupApproximation):
__doc__ = """**Single Group Mean Field Approximation**
""" + str(
MeanFieldGroup.__doc__
)
_group_class = MeanFieldGroup
class FullRank(SingleGroupApproximation):
__doc__ = """**Single Group Full Rank Approximation**
""" + str(
FullRankGroup.__doc__
)
_group_class = FullRankGroup
class Empirical(SingleGroupApproximation):
__doc__ = """**Single Group Full Rank Approximation**
""" + str(
EmpiricalGroup.__doc__
)
_group_class = EmpiricalGroup
def __init__(self, trace=None, size=None, **kwargs):
if kwargs.get("local_rv", None) is not None:
raise opvi.LocalGroupError("Empirical approximation does not support local variables")
super().__init__(trace=trace, size=size, **kwargs)
def evaluate_over_trace(self, node):
R"""
This allows to statically evaluate any symbolic expression over the trace.
Parameters
----------
node: Aesara Variables (or Aesara expressions)
Returns
-------
evaluated node(s) over the posterior trace contained in the empirical approximation
"""
node = self.to_flat_input(node)
def sample(post, node):
return aesara.clone_replace(node, {self.input: post})
nodes, _ = aesara.scan(sample, self.histogram, non_sequences=[node])
return nodes
| {"hexsha": "90f25c3631a775a0781b82ba5c167c5e68482183", "size": 12502, "ext": "py", "lang": "Python", "max_stars_repo_path": "pymc/variational/approximations.py", "max_stars_repo_name": "cluhmann/pymc", "max_stars_repo_head_hexsha": "562be3781c9d37d3300c4efd4cf6598e5739c32d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2022-03-01T16:45:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T16:45:19.000Z", "max_issues_repo_path": "pymc/variational/approximations.py", "max_issues_repo_name": "cluhmann/pymc", "max_issues_repo_head_hexsha": "562be3781c9d37d3300c4efd4cf6598e5739c32d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pymc/variational/approximations.py", "max_forks_repo_name": "cluhmann/pymc", "max_forks_repo_head_hexsha": "562be3781c9d37d3300c4efd4cf6598e5739c32d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-10T21:43:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-10T21:43:24.000Z", "avg_line_length": 32.3886010363, "max_line_length": 110, "alphanum_fraction": 0.6196608543, "include": true, "reason": "import numpy", "num_tokens": 2946} |
[STATEMENT]
lemma complex_roots_complex_prod [simp]:
assumes "f \<noteq> 0" "g \<noteq> 0"
shows "mset (complex_roots_complex (f * g))
= mset (complex_roots_complex f) + mset (complex_roots_complex g)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
[PROOF STEP]
proof -
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
[PROOF STEP]
let ?p = "f * g"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
[PROOF STEP]
let "?lc v" = "(lead_coeff (v:: complex poly))"
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
[PROOF STEP]
have nonzero_prod:"?lc ?p \<noteq> 0"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. lead_coeff (f * g) \<noteq> 0
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
f \<noteq> 0
g \<noteq> 0
goal (1 subgoal):
1. lead_coeff (f * g) \<noteq> 0
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
lead_coeff (f * g) \<noteq> 0
goal (1 subgoal):
1. mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
[PROOF STEP]
from reconstruct_prod[of "?lc f" "complex_roots_complex f" "?lc g" "complex_roots_complex g"]
[PROOF STATE]
proof (chain)
picking this:
reconstruct_poly (lead_coeff f) (complex_roots_complex f) * reconstruct_poly (lead_coeff g) (complex_roots_complex g) = reconstruct_poly (lead_coeff f * lead_coeff g) (complex_roots_complex f @ complex_roots_complex g)
[PROOF STEP]
have "reconstruct_poly (?lc ?p) (complex_roots_complex ?p)
= reconstruct_poly (?lc ?p) (complex_roots_complex f @ complex_roots_complex g)"
[PROOF STATE]
proof (prove)
using this:
reconstruct_poly (lead_coeff f) (complex_roots_complex f) * reconstruct_poly (lead_coeff g) (complex_roots_complex g) = reconstruct_poly (lead_coeff f * lead_coeff g) (complex_roots_complex f @ complex_roots_complex g)
goal (1 subgoal):
1. reconstruct_poly (lead_coeff (f * g)) (complex_roots_complex (f * g)) = reconstruct_poly (lead_coeff (f * g)) (complex_roots_complex f @ complex_roots_complex g)
[PROOF STEP]
unfolding lead_coeff_mult[symmetric] reconstruct_is_original_poly
[PROOF STATE]
proof (prove)
using this:
f * g = reconstruct_poly (lead_coeff (f * g)) (complex_roots_complex f @ complex_roots_complex g)
goal (1 subgoal):
1. f * g = reconstruct_poly (lead_coeff (f * g)) (complex_roots_complex f @ complex_roots_complex g)
[PROOF STEP]
by auto
[PROOF STATE]
proof (state)
this:
reconstruct_poly (lead_coeff (f * g)) (complex_roots_complex (f * g)) = reconstruct_poly (lead_coeff (f * g)) (complex_roots_complex f @ complex_roots_complex g)
goal (1 subgoal):
1. mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
[PROOF STEP]
from reconstruct_poly_defines_mset_of_argument[OF nonzero_prod this]
[PROOF STATE]
proof (chain)
picking this:
mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f @ complex_roots_complex g)
[PROOF STEP]
show ?thesis
[PROOF STATE]
proof (prove)
using this:
mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f @ complex_roots_complex g)
goal (1 subgoal):
1. mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
[PROOF STEP]
by simp
[PROOF STATE]
proof (state)
this:
mset (complex_roots_complex (f * g)) = mset (complex_roots_complex f) + mset (complex_roots_complex g)
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 1544, "file": "Berlekamp_Zassenhaus_Mahler_Measure", "length": 14} |
/*
* Copyright (c) 2019, 2020, 2021 SiKol Ltd.
*
* Boost Software License - Version 1.0 - August 17th, 2003
*
* Permission is hereby granted, free of charge, to any person or organization
* obtaining a copy of the software and accompanying documentation covered by
* this license (the "Software") to use, reproduce, display, distribute,
* execute, and transmit the Software, and to prepare derivative works of the
* Software, and to permit third-parties to whom the Software is furnished to
* do so, all subject to the following:
*
* The copyright notices in the Software and this entire statement, including
* the above license grant, this restriction and the following disclaimer,
* must be included in all copies of the Software, in whole or in part, and
* all derivative works of the Software, unless such copies or derivative
* works are solely in the form of machine-executable object code generated by
* a source language processor.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
* SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
* FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#ifndef SK_CONFIG_PARSER_VARIANT_HXX_INCLUDED
#define SK_CONFIG_PARSER_VARIANT_HXX_INCLUDED
#include <variant>
#include <boost/spirit/home/x3/core/parser.hpp>
#include <boost/spirit/home/x3/operator/alternative.hpp>
#include <sk/config/parser_for.hxx>
namespace sk::config {
namespace detail {
template <typename... Parsers>
struct variant_parser
: boost::spirit::x3::parser<variant_parser<Parsers...>> {
typedef std::variant<typename Parsers::attribute_type...>
attribute_type;
static bool const has_attribute = true;
template <typename Iterator, typename Context, typename Attribute>
bool parse(Iterator &first, Iterator const &last,
Context const &context, boost::spirit::x3::unused_type,
Attribute &attr) const {
namespace x3 = boost::spirit::x3;
static auto const parser = (... | Parsers());
return parser.parse(first, last, context, x3::unused, attr);
}
};
template <typename... Ts>
void propagate_value(auto & /*ctx*/, std::variant<Ts...> &to,
std::variant<Ts...> &from) {
to = std::move(from);
}
} // namespace detail
template <typename... Ts> struct parser_for<std::variant<Ts...>> {
using parser_type =
detail::variant_parser<typename parser_for<Ts>::parser_type...>;
using rule_type = std::variant<Ts...>;
static constexpr char const name[] = "a value";
};
} // namespace sk::config
#endif // SK_CONFIG_PARSER_VARIANT_HXX_INCLUDED
| {"hexsha": "785879ac92ff2d169d11bed9ba1c40c02856a2d4", "size": 3138, "ext": "hxx", "lang": "C++", "max_stars_repo_path": "include/sk/config/parser/variant.hxx", "max_stars_repo_name": "sikol/sk-config", "max_stars_repo_head_hexsha": "ada0c72ac5703763b1f1d9aadc2cc3850bf11acc", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "include/sk/config/parser/variant.hxx", "max_issues_repo_name": "sikol/sk-config", "max_issues_repo_head_hexsha": "ada0c72ac5703763b1f1d9aadc2cc3850bf11acc", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/sk/config/parser/variant.hxx", "max_forks_repo_name": "sikol/sk-config", "max_forks_repo_head_hexsha": "ada0c72ac5703763b1f1d9aadc2cc3850bf11acc", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7215189873, "max_line_length": 78, "alphanum_fraction": 0.6743148502, "num_tokens": 671} |
[STATEMENT]
lemma invariance_of_domain_homeomorphism:
fixes f :: "'a::euclidean_space \<Rightarrow> 'b::euclidean_space"
assumes "open S" "continuous_on S f" "DIM('b) \<le> DIM('a)" "inj_on f S"
obtains g where "homeomorphism S (f ` S) f g"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<And>g. homeomorphism S (f ` S) f g \<Longrightarrow> thesis) \<Longrightarrow> thesis
[PROOF STEP]
proof
[PROOF STATE]
proof (state)
goal (1 subgoal):
1. (\<And>g. homeomorphism S (f ` S) f g \<Longrightarrow> thesis) \<Longrightarrow> homeomorphism S (f ` S) f ?g2
[PROOF STEP]
show "homeomorphism S (f ` S) f (inv_into S f)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. homeomorphism S (f ` S) f (inv_into S f)
[PROOF STEP]
by (simp add: assms continuous_on_inverse_open homeomorphism_def)
[PROOF STATE]
proof (state)
this:
homeomorphism S (f ` S) f (inv_into S f)
goal:
No subgoals!
[PROOF STEP]
qed | {"llama_tokens": 363, "file": null, "length": 4} |
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def plot_ellipsoid(m, s2, *, r=2, alpha=None, beta=None,
plot_axes=True, line_color='r', line_width=2,
plot_ellip=True, ellip_color=(.8, .8, .8), ellip_alpha = 0.5,
n_points=1000, point_color='b'):
"""For details, see here.
Parameters
----------
m : array, shape (3,)
s2 : array, shape (3, 3)
r : scalar, optional
alpha : array, shape (n,)
beta : array, shape (n,)
plot_axes : boolean, optional
line_color : char or tuple, optional
line_width : scalar, optional
plot_ellip : boolean, optional
ellip_color : char or tuple, optional
ellip_alpha : scalar, optional
n_points : scalar, optional
point_color : char or tuple, optional
Returns
-------
f : matplotlib figure handle
ax : matplotlib axis handle
"""
lambda2, e = np.linalg.eigh(s2)
s = e * np.sqrt(lambda2)
plt.style.use('arpm')
f, ax = plt.subplots(1, 1, subplot_kw={'projection':'3d'},
figsize=(14, 10))
if n_points > 0:
points = np.random.multivariate_normal(m, s2, n_points)
ax.plot(points[:, 0], points[:, 1], points[:, 2],
'.', color=point_color)
if plot_axes is True:
x_axes_ = np.array([[0, r], [0, 0], [0, 0]])
y_axes_ = np.array([[0, 0], [0, r], [0, 0]])
z_axes_ = np.array([[0, 0], [0, 0], [0, r]])
x_axes = s[0, 0] * x_axes_ + s[0, 1] * y_axes_ + s[0, 2] * z_axes_
y_axes = s[1, 0] * x_axes_ + s[1, 1] * y_axes_ + s[1, 2] * z_axes_
z_axes = s[2, 0] * x_axes_ + s[2, 1] * y_axes_ + s[2, 2] * z_axes_
ax.plot(x_axes[0, :], y_axes[0, :], z_axes[0, :],
color=line_color, lw=line_width)
ax.plot(x_axes[1, :], y_axes[1, :], z_axes[1, :],
color=line_color, lw=line_width)
ax.plot(x_axes[2, :], y_axes[2, :], z_axes[2, :],
color=line_color, lw=line_width)
if plot_ellip is True:
if alpha is None:
alpha = np.linspace(0, 2*np.pi, 50)
if beta is None:
beta = np.linspace(0, np.pi, 50)
# Cartesian coordinates that correspond to the spherical angles
x_ball = r * np.outer(np.cos(alpha), np.sin(beta))
y_ball = r * np.outer(np.sin(alpha), np.sin(beta))
z_ball = r * np.outer(np.ones_like(alpha), np.cos(beta))
x_ellip = s[0, 0] * x_ball + s[0, 1] * y_ball + s[0, 2] * z_ball
y_ellip = s[1, 0] * x_ball + s[1, 1] * y_ball + s[1, 2] * z_ball
z_ellip = s[2, 0] * x_ball + s[2, 1] * y_ball + s[2, 2] * z_ball
ax.plot_surface(x_ellip, y_ellip, z_ellip, color=ellip_color, alpha=ellip_alpha)
return f, ax
| {"hexsha": "6a58d417f6ed1e86cdef4dee07a8fc11d1b3ac2e", "size": 2900, "ext": "py", "lang": "Python", "max_stars_repo_path": "arpym/tools/plot_ellipsoid.py", "max_stars_repo_name": "dpopadic/arpmRes", "max_stars_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-04-10T13:24:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T08:20:42.000Z", "max_issues_repo_path": "arpym/tools/plot_ellipsoid.py", "max_issues_repo_name": "dpopadic/arpmRes", "max_issues_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "arpym/tools/plot_ellipsoid.py", "max_forks_repo_name": "dpopadic/arpmRes", "max_forks_repo_head_hexsha": "ddcc4de713b46e3e9dcb77cc08c502ce4df54f76", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-08-13T22:02:17.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T17:49:12.000Z", "avg_line_length": 34.9397590361, "max_line_length": 88, "alphanum_fraction": 0.5365517241, "include": true, "reason": "import numpy", "num_tokens": 927} |
module ESValues
using GLM
using StatsBase
# package code goes here
include("iterators.jl")
export esvalues, ESValuesEstimator
typealias MaskType Float64
type ESValuesEstimator{T}
x
f::Function
X
link
featureGroups::Vector{Vector{Int64}}
weights
nsamples::Int64
varyingInds::Vector{Int64}
varyingFeatureGroups::Vector{Vector{Int64}}
data::Matrix{T}
maskMatrix::Matrix{MaskType}
kernelWeights::Vector{MaskType}
y::Vector{Float64}
ey::Vector{Float64}
lastMask::Vector{Float64}
P::Int64
N::Int64
M::Int64
nsamplesAdded::Int64
nsamplesRun::Int64
fx::Float64
fnull::Float64
end
"Designed to determine the ES values (importance) of each feature for f(x)."
function esvalues(x, f::Function, X, link=identity; featureGroups=nothing, weights=nothing, nsamples=0)
esvalues(ESValuesEstimator(f, X, link; featureGroups=featureGroups, weights=weights, nsamples=nsamples), x)
end
function esvalues(e::ESValuesEstimator, x)
@assert length(x) == e.P "Provided 'x' length must match the data matrix features count ($(length(x)) != $(e.P))!"
e.x = x
# find the feature groups we will test. If a feature does not change from its
# current value then we know it doesn't impact the model
e.varyingInds = varying_groups(e.x, e.X, e.featureGroups)
e.varyingFeatureGroups = e.featureGroups[e.varyingInds]
e.M = length(e.varyingFeatureGroups)
# find f(x) and E_x[f(x)]
e.fx = e.f(x)[1]
e.fnull = sum(vec(e.f(e.X)) .* e.weights)
# if no features vary then there no feature has an effect
if e.M == 0
return e.fx,zeros(length(e.featureGroups)),zeros(length(e.featureGroups))
# if only one feature varies then it has all the effect
elseif e.M == 1
fx = mean(e.f(x))
fnull = sum(vec(e.f(e.X)) .* e.weights)
φ = zeros(length(e.featureGroups))
φ[e.varyingInds[1]] = e.link(e.fx) - e.link(e.fnull)
return e.fnull,φ,zeros(length(e.featureGroups))
end
# pick a reasonable number of samples if the user didn't specify how many they wanted
if e.nsamples == 0
e.nsamples = 2*e.M+1000
end
if e.M <= 30 && e.nsamples > 2^e.M-2
e.nsamples = 2^e.M-2
end
@assert e.nsamples >= min(2*e.M, 2^e.M-2) "'nsamples' must be at least 2 times the number of varying feature groups!"
# add the singleton samples
allocate!(e)
for (m,w) in take(drop(eskernelsubsets(collect(1:e.M), ones(e.M)), 2), 2*e.M)
addsample!(e, x, m, w)
end
run!(e)
# if there might be more samples then enumarate them
if length(e.y) >= 2*e.M
# estimate the variance of each ES value estimate
variances = zeros(e.M)
for i in 1:2:2*e.M
variances[div(i+1,2)] = var([e.y[i] - e.fnull, e.fx - e.y[i+1]])
end
# now add the rest of the samples giving priority to ES values with high estimated variance
for (m,w) in take(drop(eskernelsubsets(collect(1:e.M), variances), 2*e.M+2), e.nsamples-(2*e.M))
addsample!(e, x, m, w)
end
run!(e)
end
# solve then expand the ES values vector to contain the non-varying features as well
vφ,vφVar = solve!(e)
φ = zeros(length(e.featureGroups))
φ[e.varyingInds] = vφ
φVar = zeros(length(e.featureGroups))
φVar[e.varyingInds] = vφVar
# return the Shapley values along with variances of the estimates
e.fnull,φ,φVar
end
function ESValuesEstimator{T}(f::Function, X::Matrix{T}, link=identity; featureGroups=nothing, weights=nothing, nsamples=0)
P,N = size(X)
# give default values to omitted arguments
weights != nothing || (weights = ones(N))
weights ./= sum(weights)
featureGroups != nothing || (featureGroups = Array{Int64,1}[Int64[i] for i in 1:size(X)[1]])
featureGroups = convert(Array{Array{Int64,1},1}, featureGroups)
@assert length(weights) == N "Provided 'weights' must match the number of representative data points (size(X)[2])!"
ESValuesEstimator(
zeros(1),
f,
X,
link,
featureGroups,
weights,
nsamples,
Int64[],
Vector{Int64}[],
zeros(T, 1, 1),
zeros(MaskType, 1, 1),
zeros(MaskType, 1),
zeros(Float64, 1),
zeros(Float64, 1),
zeros(Float64, 1),
P,
N,
0,
0,
0,
0.0,
0.0
)
end
function allocate!{T}(e::ESValuesEstimator{T})
e.data = zeros(T, e.P, e.nsamples * e.N)
e.maskMatrix = zeros(MaskType, e.M-1, e.nsamples)
e.kernelWeights = zeros(MaskType, e.nsamples)
e.y = zeros(Float64, e.nsamples * e.N)
e.ey = zeros(Float64, e.nsamples)
e.lastMask = zeros(Float64, e.nsamples)
end
function addsample!(e::ESValuesEstimator, x, m, w)
offset = e.nsamplesAdded * e.N
e.nsamplesAdded += 1
for i in 1:e.N
for j in 1:e.M
for k in e.varyingFeatureGroups[j]
if m[j] == 1.0
e.data[k,offset+i] = x[k]
else
e.data[k,offset+i] = e.X[k,i]
end
end
end
end
e.maskMatrix[:,e.nsamplesAdded] = m[1:end-1] - m[end]
e.lastMask[e.nsamplesAdded] = m[end]
e.kernelWeights[e.nsamplesAdded] = w
end
function run!(e::ESValuesEstimator)
e.y[e.nsamplesRun*e.N+1:e.nsamplesAdded*e.N] = e.f(e.data[:,e.nsamplesRun*e.N+1:e.nsamplesAdded*e.N])
# find the expected value of each output
for i in e.nsamplesRun+1:e.nsamplesAdded
eyVal = 0.0
for j in 1:e.N
eyVal += e.y[(i-1)*e.N + j] * e.weights[j]
end
e.ey[i] = eyVal
e.nsamplesRun += 1
end
end
function solve!(e::ESValuesEstimator)
# adjust the y value according to the constraints for the offset and sum
eyAdj = e.link.(e.ey) .- e.lastMask*(e.link(e.fx) - e.link(e.fnull)) - e.link(e.fnull)
# solve a weighted least squares equation to estimate φ
tmp = e.maskMatrix .* e.kernelWeights'
tmp2 = inv(tmp*e.maskMatrix')
w = tmp2*(tmp*eyAdj)
wlast = (e.link(e.fx) - e.link(e.fnull)) - sum(w)
φ = [w; wlast]
yHat = e.maskMatrix'w
φVar = var(yHat .- eyAdj) * diag(tmp2)
φVar = [φVar; maximum(φVar)] # since the last weight is inferred we use a pessimistic guess of its variance
# a finite sample adjustment based on how much of the weight is left in the sample space
fractionWeightLeft = 1 - sum(e.kernelWeights)/sum([(e.M-1)/(s*(e.M-s)) for s in 1:e.M-1])
φ,φVar*fractionWeightLeft
end
"Identify which feature groups vary."
function varying_groups(x, X, featureGroups)
varying = zeros(length(featureGroups))
for (i,inds) in enumerate(featureGroups)
varying[i] = sum(vec(sum(x[inds] .== X[inds,:],1) .!= length(inds)))
end
find(varying)
end
end # module
| {"hexsha": "07d40665cf7965d0c36c078081ac0c6899317520", "size": 6893, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/ESValues.jl", "max_stars_repo_name": "slundberg/ESValues.jl", "max_stars_repo_head_hexsha": "0a3d6400915951d2c9a3d60f6b180224df922598", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2017-03-13T09:09:20.000Z", "max_stars_repo_stars_event_max_datetime": "2017-03-13T09:09:20.000Z", "max_issues_repo_path": "src/ESValues.jl", "max_issues_repo_name": "slundberg/ESValues.jl", "max_issues_repo_head_hexsha": "0a3d6400915951d2c9a3d60f6b180224df922598", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/ESValues.jl", "max_forks_repo_name": "slundberg/ESValues.jl", "max_forks_repo_head_hexsha": "0a3d6400915951d2c9a3d60f6b180224df922598", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.7723214286, "max_line_length": 123, "alphanum_fraction": 0.6172929058, "num_tokens": 2088} |
"""Prepare data for plotting"""
from lcmap_tap.logger import exc_handler, log
from lcmap_tap.Plotting import plot_functions
from lcmap_tap.RetrieveData.retrieve_ccd import CCDReader
from lcmap_tap.RetrieveData.retrieve_classes import SegmentClasses
import sys
import numpy as np
import datetime as dt
from collections import OrderedDict
from typing import Union
sys.excepthook = exc_handler
index_functions = {'ndvi': {'func': plot_functions.ndvi, 'bands': ('reds', 'nirs'), 'inds': (2, 3)},
'msavi': {'func': plot_functions.msavi, 'bands': ('reds', 'nirs'), 'inds': (2, 3)},
'evi': {'func': plot_functions.evi, 'bands': ('blues', 'reds', 'nirs'), 'inds': (0, 2, 3)},
'savi': {'func': plot_functions.savi, 'bands': ('reds', 'nirs'), 'inds': (2, 3)},
'ndmi': {'func': plot_functions.ndmi, 'bands': ('nirs', 'swir1s'), 'inds': (3, 4)},
'nbr': {'func': plot_functions.nbr, 'bands': ('nirs', 'swir2s'), 'inds': (3, 5)},
'nbr2': {'func': plot_functions.nbr2, 'bands': ('swir1s', 'swir2s'), 'inds': (4, 5)}
}
class PlotSpecs:
"""
Generate and retain the data required for plotting
"""
bands = ('blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'thermal')
def __init__(self, ard: dict, change: CCDReader, segs: SegmentClasses, items: list,
begin: dt.date = dt.date(year=1982, month=1, day=1),
end: dt.date = dt.date(year=2017, month=12, day=31)):
"""
Args:
ard: The ARD observations for a given point (ARDData.pixel_ard)
change: PyCCD results for a given point (CCDReader.results)
segs: Classification results (SegmentClasses.results)
begin: Beginning day of PyCCD
end: Ending day of PyCCD
"""
self.begin = begin
self.end = end
self.items = items
self.ard = self.make_arrays(ard)
self.dates = self.ard['dates']
try:
self.results = change.results
self.ccd_mask = np.array(self.results['processing_mask'], dtype=np.bool)
except (AttributeError, TypeError) as e:
# log.debug('Exception: %s' % e, exc_info=True)
log.info('No CCD results were found')
self.results = None
self.ccd_mask = []
try:
self.segment_classes = segs.results
except (AttributeError, TypeError) as e:
# log.debug('Exception: %s' % e, exc_info=True)
log.info('No classification results were found')
self.segment_classes = None
self.date_mask = self.mask_daterange(dates=self.dates,
start=begin,
stop=end)
self.dates_in = self.ard['dates'][self.date_mask]
self.dates_out = self.ard['dates'][~self.date_mask]
self.qa_mask = np.isin(self.ard['qas'], [66, 68, 322, 324])
self.fill_mask = np.isin(self.ard['qas'], [n for n in np.unique(self.ard['qas']) if n != 1])
self.fill_in = self.fill_mask[self.date_mask]
self.fill_out = self.fill_mask[~self.date_mask]
# # self.total_mask = np.logical_and(self.ccd_mask, self.fill_in)
# self.total_mask = np.logical_and(self.qa_mask[date_mask], self.fill_in)
# Check for presence of thermals, rescale if present
if 'thermals' in self.ard.keys():
self.rescale_thermal()
self.index_to_observations()
if self.results is not None:
self.predicted_values, \
self.prediction_dates, \
self.break_dates, \
self.start_dates, \
self.end_dates = self.get_modelled_specs(self.results)
else:
self.predicted_values = []
self.prediction_dates = []
self.break_dates = []
self.start_dates = []
self.end_dates = []
self.index_lookup, self.band_lookup, self.all_lookup = self.get_lookups(results=self.results,
predicted_values=self.predicted_values)
def get_modelled_specs(self, results):
band_info = {b: {'coefs': [], 'inter': [], 'pred': []} for b in self.bands}
predicted_values = []
prediction_dates = []
break_dates = []
start_dates = []
end_dates = []
for num, result in enumerate(results['change_models']):
days = np.arange(result['start_day'], result['end_day'] + 1)
break_dates.append(result['break_day'])
start_dates.append(result['start_day'])
end_dates.append(result['end_day'])
for b in self.bands:
band_info[b]['inter'] = result[b]['intercept']
band_info[b]['coefs'] = result[b]['coefficients']
band_info[b]['pred'] = self.predicts(days, result[b]['coefficients'], result[b]['intercept'])
prediction_dates.append(days)
predicted_values.append(band_info[b]['pred'])
return predicted_values, prediction_dates, break_dates, start_dates, end_dates
def get_lookups(self, results, predicted_values):
# Calculate indices from observed values
# Calculate indices from the results' change models
# The change models are stored by order of model, then
# band number. For example, the band values for the first change model are represented by indices 0-5,
# the second model by indices 6-11, and so on.
index_modeled = self.get_modeled_index(ard=self.ard, results=results, predicted_values=predicted_values)
index_lookup = OrderedDict([('NDVI', ('ndvi', 'ndvi-modeled')),
('MSAVI', ('msavi', 'msavi-modeled')),
('EVI', ('evi', 'evi-modeled')),
('SAVI', ('savi', 'savi-modeled')),
('NDMI', ('ndmi', 'ndmi-modeled')),
('NBR', ('nbr', 'nbr-modeled')),
('NBR-2', ('nbr2', 'nbr2-modeled'))])
index_lookup = [(key, (self.ard[index_lookup[key][0]],
index_modeled[index_lookup[key][1]]))
for key in index_lookup.keys()
if index_lookup[key][0] in self.ard.keys()]
index_lookup = OrderedDict(index_lookup)
lookup = OrderedDict([("Blue", ('blues', 0)),
("Green", ('greens', 1)),
("Red", ('reds', 2)),
("NIR", ('nirs', 3)),
("SWIR-1", ('swir1s', 4)),
("SWIR-2", ('swir2s', 5)),
("Thermal", ('thermals', 6))])
band_lookup = [(key, (self.ard[lookup[key][0]],
self.get_predicts(num=lookup[key][1], bands=self.bands,
predicted_values=predicted_values, results=results)))
for key in lookup.keys()
if lookup[key][0] in self.ard.keys()]
# Example of how the band_lookup is structured:
# self.band_lookup = [("Blue", (self.ard['blues'], self.get_predicts(0))),
# ("Green", (self.ard['greens'], self.get_predicts(1))),
# ("Red", (self.ard['reds'], self.get_predicts(2))),
# ("NIR", (self.ard['nirs'], self.get_predicts(3))),
# ("SWIR-1", (self.ard['swir1s'], self.get_predicts(4))),
# ("SWIR-2", (self.ard['swir2s'], self.get_predicts(5))),
# ("Thermal", (self.ard['thermals'], self.get_predicts(6)))]
band_lookup = OrderedDict(band_lookup)
# Combine these two dictionaries
all_lookup = plot_functions.merge_dicts(band_lookup, index_lookup)
return index_lookup, band_lookup, all_lookup
@staticmethod
def mask_daterange(dates: np.array, start: dt.date, stop: dt.date) -> np.array:
"""
Create a mask for values outside of the global BEGIN_DATE and END_DATE
Args:
dates: List or array of dates to check against
start: Begin date stored as a datetime.date object
stop: End date stored as a datetime.date object
Returns:
Array containing the locations of the truth condition
"""
return np.logical_and(dates >= start.toordinal(), dates < stop.toordinal())
@staticmethod
def predicts(days, coef, intercept):
"""
Calculate change segment curves
Args:
days:
coef:
intercept:
Returns:
"""
return (intercept + coef[0] * days +
coef[1] * np.cos(days * 1 * 2 * np.pi / 365.25) + coef[2] * np.sin(days * 1 * 2 * np.pi / 365.25) +
coef[3] * np.cos(days * 2 * 2 * np.pi / 365.25) + coef[4] * np.sin(days * 2 * 2 * np.pi / 365.25) +
coef[5] * np.cos(days * 3 * 2 * np.pi / 365.25) + coef[6] * np.sin(days * 3 * 2 * np.pi / 365.25))
@staticmethod
def get_predicts(num: Union[int, list], bands: tuple, predicted_values: list, results: dict) -> list:
"""
Return the model prediction values in the time series for a particular band or bands
Args:
num:
Returns:
A list of segment models
"""
# Check for type int, create list if true
if isinstance(num, int):
num = [num]
try:
_predicts = [predicted_values[m * len(bands) + n] for n in num
for m in range(len(results["change_models"]))]
except (IndexError, TypeError) as e:
log.error('Exception: %s' % e, exc_info=True)
_predicts = []
return _predicts
@staticmethod
def make_arrays(in_dict: dict) -> dict:
"""
Convert a dict of lists into arrays
Args:
in_dict:
Returns:
"""
for key in in_dict.keys():
if isinstance(in_dict[key], list):
in_dict[key] = np.array(in_dict[key])
return in_dict
def rescale_thermal(self):
"""
Fix the scaling of the Brightness Temperature, if it was selected for plotting
"""
temp_thermal = np.copy(self.ard['thermals'])
temp_thermal[self.fill_mask] = temp_thermal[self.fill_mask] * 10 - 27315
self.ard['thermals'] = np.copy(temp_thermal)
return None
def index_to_observations(self):
"""
Add index calculated observations to the timeseries pixel rod
Returns:
"""
indices = ['NDVI', 'MSAVI', 'EVI', 'SAVI', 'NDMI', 'NBR', 'NBR-2']
selected_indices = [i for i in indices if i in self.items or 'All Indices' in self.items]
for i in selected_indices:
key = i.lower().replace('-', '')
call = index_functions[key]['func']
args = tuple([self.ard[band] for band in index_functions[key]['bands']])
self.ard[key] = call(*args)
return None
@staticmethod
def get_modeled_index(ard, results, predicted_values):
"""
Calculate the model-predicted index curves
Returns:
"""
bands = ('blue', 'green', 'red', 'nir', 'swir1', 'swir2', 'thermal')
indices = ('ndvi', 'msavi', 'evi', 'savi', 'ndmi', 'nbr', 'nbr2')
modeled = dict()
for key in ard.keys():
if key in indices:
new_key = f'{key}-modeled'
modeled[new_key] = list()
call = index_functions[key]['func']
inds = index_functions[key]['inds']
try:
for m in range(len(results['change_models'])):
args = tuple([predicted_values[m * len(bands) + ind] for ind in inds])
modeled[new_key].append(call(*args))
except (AttributeError, TypeError) as e:
log.error('Exception: %s' % e, exc_info=True)
modeled[new_key].append([])
return modeled
| {"hexsha": "32066e444a20db59132c8cc993f7c14668437a33", "size": 12832, "ext": "py", "lang": "Python", "max_stars_repo_path": "lcmap_tap/Plotting/plot_specs.py", "max_stars_repo_name": "danzelenak-usgs/TAP_Tool", "max_stars_repo_head_hexsha": "4ae3f4105df47efaa57283b25019e398b32de7b9", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2018-09-05T04:29:27.000Z", "max_stars_repo_stars_event_max_datetime": "2020-01-08T19:18:50.000Z", "max_issues_repo_path": "lcmap_tap/Plotting/plot_specs.py", "max_issues_repo_name": "danzelenak-usgs/TAP_Tool", "max_issues_repo_head_hexsha": "4ae3f4105df47efaa57283b25019e398b32de7b9", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-08-10T18:32:50.000Z", "max_issues_repo_issues_event_max_datetime": "2018-08-10T18:32:50.000Z", "max_forks_repo_path": "lcmap_tap/Plotting/plot_specs.py", "max_forks_repo_name": "danzelenak-usgs/TAP_Tool", "max_forks_repo_head_hexsha": "4ae3f4105df47efaa57283b25019e398b32de7b9", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2018-04-25T14:01:07.000Z", "max_forks_repo_forks_event_max_datetime": "2018-04-25T14:01:07.000Z", "avg_line_length": 36.6628571429, "max_line_length": 120, "alphanum_fraction": 0.5175342893, "include": true, "reason": "import numpy", "num_tokens": 2973} |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Discriminative Layer Training Manager for TensorFlow."""
import tensorflow as tf
import numpy as np
from tensorflow_addons.optimizers.discriminative_layer_training import (
DiscriminativeLayerOptimizer,
)
import itertools
import os
import tempfile
import pytest
import sys
def toy_cnn():
"""Consistently create model with same random weights.
Skip head activation to allow both bce with logits and cce with logits.
The model returned by this function should have identical weights to all
other models returned by this function, for the duration of that
continuous integration run.
Run this function within a test, but make sure it runs before other tests.
Model is intended to work with
x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32)
y = np.zeros(shape = (None, 5), dtype = np.float32)
y[:, 0] = 1.
"""
cnn_model_path = os.path.join(tempfile.gettempdir(), "cnn.h5")
if not os.path.exists(cnn_model_path):
bignet = tf.keras.applications.mobilenet_v2.MobileNetV2(
include_top=False, weights=None, input_shape=(32, 32, 3), pooling="avg"
)
# Take the first few layers so we cover BN, Conv, Pooling ops for testing.
net = tf.keras.models.Model(
inputs=bignet.input, outputs=bignet.get_layer("block_2_add").output
)
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(32, 32, 3)),
net,
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(5, name="head"),
]
)
model.save(cnn_model_path)
# This creates a model with set weights for testing purposes.
# Most tests will assert equivalency between a model with discriminative training and a model without.
return tf.keras.models.load_model(cnn_model_path)
else:
assert os.path.exists((cnn_model_path)), (
"Could not find h5 file at path %s " % cnn_model_path
)
# Load the variable initialized model from the disk.
return tf.keras.models.load_model(cnn_model_path)
def toy_rnn():
"""Consistently create model with same random weights.
Skip head activation to allow both bce with logits and cce with logits.
The model returned by this function should have identical weights to all
other models returned by this function, for the duration of that
continuous integration run.
Run this function within a test, but make sure it runs before other tests.
Model is intended to work with
x = np.ones(shape = (None, 32, 32, 3), dtype = np.float32)
y = np.zeros(shape = (None, 5), dtype = np.float32)
y[:, 0] = 1.
"""
rnn_model_path = os.path.join(tempfile.gettempdir(), "rnn.h5")
if not os.path.exists(rnn_model_path):
# Pretend this net is a pretrained lstm of some sort.
net = tf.keras.Sequential()
# Crop the input shape so the lstm runs faster.
# Pretrained need inputshape for weights to be initialized.
net.add(
tf.keras.layers.Cropping2D(
cropping=((8, 8), (12, 12)), input_shape=(32, 32, 3)
)
)
# Reshape into a timeseries.
net.add(tf.keras.layers.Reshape(target_shape=(16, 8 * 3)))
# Reduce the length of the time series.
net.add(tf.keras.layers.Cropping1D(cropping=(0, 5)))
# We are primarily interested in the bidir lstm layer and its behavior.
net.add(tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(4)))
model = tf.keras.Sequential(
[
tf.keras.layers.InputLayer(input_shape=(32, 32, 3)),
net,
tf.keras.layers.Dropout(0.5),
tf.keras.layers.Dense(5, name="head"),
]
)
model.save(rnn_model_path)
# This creates a model with set weights for testing purposes.
# Most tests will assert equivalency between a model with discriminative training and a model without.
return tf.keras.models.load_model(rnn_model_path)
else:
assert os.path.exists((rnn_model_path)), (
"Could not find h5 file at path %s " % rnn_model_path
)
# Load the variable initialized model from the disk
return tf.keras.models.load_model(rnn_model_path)
def _get_train_results(model, verbose=False, epochs=10):
"""Run a training loop and return the results for analysis.
Model must be compiled first.
Training data sizes reduced.
"""
tf.random.set_seed(1)
x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32)
y = np.zeros(shape=(8, 5), dtype=np.float32)
y[:, 0] = 1.0
return model.fit(x, y, epochs=epochs, batch_size=4, verbose=verbose, shuffle=False)
def _zipped_permutes():
model_fns = [
# Generally, we want to test that common layers function correctly with discriminative layer training.
# Dense, conv2d, batch norm, lstm, pooling, should cover the majority of layer types.
# We also assume that if it works for conv2d, it should work for conv3d by extension.
# Apply the same extension logic for all layers tested and it should cover maybe 90% of layers in use?
toy_cnn,
toy_rnn,
]
losses = [
# Additional loss types do not need to be tested.
# This is because losses affect the gradient tape, which is computed before
# the apply_gradients step. This means that the some gradient value is passed on to each opt
# and the gradient calculation is unaffected by which optimizer you are using.
tf.keras.losses.CategoricalCrossentropy(from_logits=True),
]
optimzers = [
# Additional optimizers can be added for testing.
# However, testing adam should cover most optimizer behaviours because it uses momentum.
tf.keras.optimizers.Adam,
]
return list(itertools.product(model_fns, losses, optimzers))
def get_losses(hist):
return np.array(hist.__dict__["history"]["loss"])
def _assert_losses_are_close(hist, hist_lr):
"""Higher tolerance for graph and distributed bc unable to run deterministically."""
if not tf.executing_eagerly() or tf.distribute.has_strategy():
rtol, atol = 0.05, 1.00
# print('graph or dist')
else:
rtol, atol = 0.01, 0.01
return np.testing.assert_allclose(
get_losses(hist), get_losses(hist_lr), rtol=rtol, atol=atol
)
def _assert_training_losses_are_close(model, model_lr, epochs=10):
"""Easy way to check if two models train in almost the same way.
Epochs set to 10 by default to allow momentum methods to pick up momentum and diverge,
if the disc training is not working.
"""
hist = _get_train_results(model, verbose=False, epochs=epochs)
hist_lr = _get_train_results(model_lr, verbose=False, epochs=epochs)
_assert_losses_are_close(hist, hist_lr)
def test_a_initialize_model_weights():
"""This test should run first to initialize the model weights.
There seem to be major issues in initializing model weights on the fly when testing,
so we initialize them and save them to an h5 file and reload them each time.
This ensures that when comparing two runs, they start at the same place.
This is not actually testing anything, so it does not need to run in eager and graph.
This needs to run distributed or else it will cause the cannot modify virtual devices error."""
toy_cnn()
toy_rnn()
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def test_equal_with_no_layer_lr(model_fn, loss, opt):
"""Confirm that discriminative learning is almost the same as regular learning."""
learning_rate = 0.01
model = model_fn()
model.compile(loss=loss, optimizer=opt(learning_rate))
model_lr = model_fn()
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
_assert_training_losses_are_close(model, model_lr)
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_equal_0_sub_layer_lr_to_sub_layer_trainable_false(model_fn, loss, opt):
"""Confirm 0 lr_mult for the a specific layer is the same as setting layer to not trainable.
This also confirms that lr_mult propagates into that layer's trainable variables.
This also confirms that lr_mult does not propagate to the rest of the layers unintentionally.
"""
learning_rate = 0.01
model = model_fn()
# Layers 0 represents the pretrained network
model.layers[0].trainable = False
model.compile(loss=loss, optimizer=opt(learning_rate))
model_lr = model_fn()
model_lr.layers[0].lr_mult = 0.0
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
_assert_training_losses_are_close(model, model_lr)
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_equal_0_layer_lr_to_trainable_false(model_fn, loss, opt):
"""Confirm 0 lr_mult for the model is the same as model not trainable.
This also confirms that lr_mult on the model level is propagated to all sublayers and their variables.
"""
learning_rate = 0.01
model = model_fn()
model.trainable = False
model.compile(loss=loss, optimizer=opt(learning_rate))
model_lr = model_fn()
model_lr.lr_mult = 0.0
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
# Only two epochs because we expect no training to occur, thus losses shouldn't change anyways.
_assert_training_losses_are_close(model, model_lr, epochs=2)
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_equal_half_layer_lr_to_half_lr_of_opt(model_fn, loss, opt):
"""Confirm 0.5 lr_mult for the model is the same as optim with 0.5 lr.
This also confirms that lr_mult on the model level is propagated to all sublayers and their variables.
"""
mult = 0.5
learning_rate = 0.01
model = model_fn()
model.compile(loss=loss, optimizer=opt(learning_rate * mult))
model_lr = model_fn()
model_lr.lr_mult = mult
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
_assert_training_losses_are_close(model, model_lr)
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_sub_layers_keep_lr_mult(model_fn, loss, opt):
"""Confirm that model trains with lower lr on specific layer,
while a different lr_mult is applied everywhere else.
Also confirms that sub layers with an lr mult do not get overridden.
"""
learning_rate = 0.01
model_lr = model_fn()
# We set model to lrmult 0 and layer one to lrmult 5.
# If layer one is trainable, then the loss should decrease.
model_lr.lr_mult = 0.00
model_lr.layers[-1].lr_mult = 3
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
loss_values = get_losses(_get_train_results(model_lr, epochs=5))
np.testing.assert_array_less([loss_values[-1]], [loss_values[0]])
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_variables_get_assigned(model_fn, loss, opt):
"""Confirm that variables do get an lr_mult attribute and that they get the correct one.
"""
learning_rate = 0.01
model_lr = model_fn()
# set lr mults.
model_lr.layers[0].lr_mult = 0.3
model_lr.layers[0].layers[-1].lr_mult = 0.1
model_lr.layers[-1].lr_mult = 0.5
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
# We expect trainable vars at 0.3 to be reduced by the amount at 0.1.
# This tests that the 0.3 lr mult does not override the 0.1 lr mult.
np.testing.assert_equal(
len(model_lr.layers[0].trainable_variables)
- len(model_lr.layers[0].layers[-1].trainable_variables),
len([var for var in model_lr.trainable_variables if var.lr_mult == 0.3]),
)
# We expect trainable vars of model with lr_mult 0.1 to equal trainable vars of that layer.
np.testing.assert_equal(
len(model_lr.layers[0].layers[-1].trainable_variables),
len([var for var in model_lr.trainable_variables if var.lr_mult == 0.1]),
)
# Same logic as above.
np.testing.assert_equal(
len(model_lr.layers[-1].trainable_variables),
len([var for var in model_lr.trainable_variables if var.lr_mult == 0.5]),
)
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_model_checkpoint(model_fn, loss, opt):
"""Confirm that model does save checkpoints and can load them properly."""
learning_rate = 0.01
model_lr = model_fn()
model_lr.layers[0].lr_mult = 0.3
model_lr.layers[0].layers[-1].lr_mult = 0.1
model_lr.layers[-1].lr_mult = 0.5
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
x = np.ones(shape=(8, 32, 32, 3), dtype=np.float32)
y = np.zeros(shape=(8, 5), dtype=np.float32)
y[:, 0] = 1.0
filepath = os.path.join(tempfile.gettempdir(), model_fn.__name__ + "_cp.ckpt")
callbacks = [
tf.keras.callbacks.ModelCheckpoint(
filepath=filepath, save_weights_only=True, verbose=1
)
]
model_lr.fit(
x, y, epochs=2, batch_size=4, verbose=False, shuffle=False, callbacks=callbacks,
)
# If this doesn't error out, then loading and checkpointing should be fine.
model_lr.load_weights(filepath=filepath)
@pytest.mark.parametrize("model_fn,loss,opt", _zipped_permutes())
@pytest.mark.usefixtures("maybe_run_functions_eagerly")
def _test_config_tofrom(model_fn, loss, opt):
"""Confirm that optimizer saves config and loads config."""
# build model and save the opt to a config as c.
learning_rate = 0.01
model_lr = model_fn()
model_lr.layers[0].lr_mult = 0.3
model_lr.layers[0].layers[-1].lr_mult = 0.1
model_lr.layers[-1].lr_mult = 0.5
d_opt = DiscriminativeLayerOptimizer(
opt, model_lr, verbose=False, learning_rate=learning_rate
)
model_lr.compile(loss=loss, optimizer=d_opt)
c = d_opt.get_config()
# reconstruct the model and then build the opt from config.
model_lr = model_fn()
model_lr.layers[0].lr_mult = 0.3
model_lr.layers[0].layers[-1].lr_mult = 0.1
model_lr.layers[-1].lr_mult = 0.5
d_opt_from_config = DiscriminativeLayerOptimizer.from_config(c, model_lr)
model_lr.compile(loss=loss, optimizer=d_opt_from_config)
# we expect both optimizers to have the same optimizer group and base optimizer.
np.testing.assert_equal(
len(d_opt.optimizer_group), len(d_opt_from_config.optimizer_group)
)
np.testing.assert_equal(d_opt.opt_class, d_opt_from_config.opt_class)
# we also expect the lr for each opt in the opt groups to be the same. Also confirms same lr mult.
np.testing.assert_array_equal(
[opt.learning_rate for opt in d_opt.optimizer_group],
[opt.learning_rate for opt in d_opt_from_config.optimizer_group],
)
if __name__ == "__main__":
sys.exit(pytest.main([__file__]))
| {"hexsha": "dc7643de84162e65ebe24a743b7a5c4f448d7a3b", "size": 16829, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_addons/optimizers/discriminative_layer_training_test.py", "max_stars_repo_name": "dkamotsky/addons", "max_stars_repo_head_hexsha": "56ff850785c6caa8c18c2859e32a32c8902defea", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tensorflow_addons/optimizers/discriminative_layer_training_test.py", "max_issues_repo_name": "dkamotsky/addons", "max_issues_repo_head_hexsha": "56ff850785c6caa8c18c2859e32a32c8902defea", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_addons/optimizers/discriminative_layer_training_test.py", "max_forks_repo_name": "dkamotsky/addons", "max_forks_repo_head_hexsha": "56ff850785c6caa8c18c2859e32a32c8902defea", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.9887133183, "max_line_length": 110, "alphanum_fraction": 0.6943965773, "include": true, "reason": "import numpy", "num_tokens": 4065} |
###############################################################################################
#
# Extract raw Markdown from source files and turn them into docs
#
# - Use `names(module)` to get a list of exported symbols from the module
# - Use __META__ to get all documented symbols along with path to the sourcefile
# - Use `grep` to get line number in file
# - because MD.meta only has line for Function and not Module or DataType
# - because even when there is a line number, it is off by one
# - Use readlines to read in the source file and pull out docs & end line number
# - because julia compiles docs to incorrect Markdown objects, and discards the raw MD
# - Re-execute all docstrings because they may include external files
# - Write .md files and a TOC, add links back to repo with start/end lines for each object
# - Create mkdocs.yml
# - Run `mkdocs` to build html docs
# - Cleanup (optional)
#
#
# -- Because julia's own documentation libraries cannot handle all of markdown
#
###############################################################################################
###############################################################################################
###
### The following constants may be modified for based on your own config
###
# Module to be documented
const mod = "mPulseAPI"
# Temporary location for generated files, change this if you already have a directory called src/
const doc_src = "src"
# YAML config file for mkdocs. This will be generated.
const mkdocsy = "mkdocs.yml"
# Prefix configuration for mkdocs. Page names will be appended to this
const mkdocs_config = Dict(
:site_name => "$(mod).jl Documentation",
:site_url => "https://akamai.github.com/$(mod).jl/",
:repo_url => "https://github.com/akamai/$(mod).jl/",
:site_favicon => "favicon.ico",
:extra_css => ["css/mkdocs.css"],
:site_description => "Communicate with the mPulse Query & Repository REST APIs to fetch information about tenants and apps.",
:copyright => "Akamai, Inc.",
:docs_dir => "src",
:use_directory_urls => false,
:theme => "readthedocs",
:markdown_extensions => [:admonition, :def_list, :attr_list, "toc:\n permalink: True"],
)
# Don't change this
immutable Page
name::AbstractString
title::AbstractString
pregenerated::Bool
Page(name::AbstractString, title::AbstractString, pregenerated::Bool=false) = new(name, title, pregenerated)
end
# Pages to build:
# - name
# - title
const pages = Page[
Page("index", mod),
Page("apiToken", "How to generate an API Token", true),
Page("RepositoryAPI", "Repository API"),
Page("QueryAPI", "Query API"),
Page("exceptions", "Exceptions"),
Page("cache_utilities", "Internal Cache Utilities"),
]
###
### End of user configurable section
###
###############################################################################################
using Formatting
eval(parse("using $mod"))
Mod = eval(parse(mod))
function getSymbols(mod::Module; order=[Module, DataType, Function])
exported = Dict( map( n -> (string(n) => getfield(mod, n)), names(mod) ) )
declarator = Dict(Function => "(function )?", DataType => "(abstract|immutable|type) ", Module => "module ")
# Now get all the symbols and mark the exported ones
function symbol2dict(k)
k_doc = Docs.doc(k)
name = replace(string(k), Regex("^$mod\."), "")
typ = typeof(k)
file = haskey(k_doc.meta, :path) ?
k_doc.meta[:path] :
length(k_doc.content) == 1 && haskey(k_doc.content[1].meta, :path) ?
k_doc.content[1].meta[:path] :
""
api_doc = ""
if file != ""
lines = open(readlines, file)
line = find(x -> ismatch(Regex("^$(declarator[typ])$(name)"), x), lines)
if length(line) == 0
println(Regex("^$(declarator[typ]) +$(name)"))
println(lines)
end
line = line[1]
if typ == Module
endline = line
api_doc_start = findnext(lines, "\"\"\"\n", line+1)
api_doc_end = findnext(lines, "\"\"\"\n", api_doc_start+1)
if api_doc_end - api_doc_start > 0
api_doc = eval(parse(join(lines[api_doc_start:api_doc_end], "")))
end
else
endline = findnext(lines, "end\n", line)
api_doc = findprev(lines, "\"\"\"\n", line-2)
if api_doc > 0
api_doc = eval(parse(join(lines[api_doc:line-1], "")))
end
end
else
line = 0
end
api_doc = replace(api_doc, r"^ \*"m, " *")
return Dict(
:name => name,
:type => typ,
:exported => haskey(exported, replace(string(k), Regex("^$mod\."), "")),
:file => replace(file, r"^.*/", ""),
:line => line,
:endline => endline,
:doc => api_doc
)
end
symbols = map( symbol2dict, filter( k -> !isa(k, ObjectIdDict), collect( keys(mod.__META__) ) ) )
expo_order = Dict(true => "1", false => "2")
type_order = Dict(zip(order, 1:length(order)))
sort!(symbols, by = x -> format("{1}.{2}.{3}.{4:04d}.{5}", expo_order[x[:exported]], type_order[x[:type]], x[:file], x[:line], x[:name]))
return symbols
end
labels = Pair[Module => "", DataType => "Type", Function => "Function"]
exps = Pair[true => "Exported", false => "Namespaced"]
symbols = getSymbols(Mod)
refids = Dict(map(s -> (s[:name] => replace(s[:file], r"\.jl$", ".md") * lowercase(string("#", s[:type], "-", s[:name]))), symbols))
function replace_refs(m)
ref = match(Regex("^\\[(`?(?:$mod\\.)?(\\w+)`?)\\]"), m)
if ref == nothing
return m
end
txt = ref.captures[1]
ref = ref.captures[2]
return "[$txt]($(refids[ref])){: .x-ref}"
end
cd(dirname(@__FILE__)) do
dir_existed = true
if !isdir(doc_src)
mkdir(doc_src)
dir_existed = false
end
open(mkdocsy, "w") do yml
for (k, v) in mkdocs_config
print(yml, k, ": ")
if isa(v, AbstractArray)
println(yml, mapfoldl(x -> "\n - $x", *, v))
else
println(yml, v)
end
end
println(yml, "pages:")
for page in pages
println("INFO - Processing $(page.name)")
# Only generate pages that are not pre-generated
page.pregenerated || open(joinpath(doc_src, page.name * ".md"), "w") do f
println(f, """
# $(page.title)
""")
local file_symbs = filter(x -> x[:file] == (page.name == "index" ? mod : page.name) * ".jl", symbols)
if page.name != "index"
for s in file_symbs
println(f, "* [$(s[:name])]($(refids[s[:name]]))")
end
end
for (exported, ex_label) in exps
for (typ, ty_label) in labels
local symbs = filter(x -> x[:exported] == exported && x[:type] == typ, file_symbs)
if length(symbs) > 0
!isempty(ty_label) && println(f, "## $ex_label $(ty_label)s")
exported || println(f, """
!!! note
The following methods are not exported by default. You may use them by explicitly
importing them or by prefixing them with the `$(mod).` namespace.
""")
for s in symbs
println(f, """
##$(typ == Module ? "" : "#") $(lowercase(string(s[:type]))) `$(s[:name])`
[$(s[:file])#$(s[:line])$(s[:endline] != s[:line] ? "-$(s[:endline])" : "")]($(mkdocs_config[:repo_url])tree/master/src/$(s[:file])#L$(s[:line])-L$(s[:endline])){: .source-link}
""")
# Replace references with links to actual functions
s_doc = replace(s[:doc], Regex("\\[`?(?:$mod\\.)?\\w+`?\\]\\(@ref\\)"), replace_refs)
s_doc = replace(s_doc, r"^`(\w+)`"m, m -> (r = replace(m, "`", ""); haskey(refids, r) ? "[$m]($(refids[r]))" : m))
# Remove `docs/src/` from any links since we might have that in raw md in our functions
s_doc = replace(s_doc, r"/?docs/src/", "")
println(f, s_doc, "\n---\n")
end
end
end
end
if page.name == "index"
println(f, """
## API Reference
""")
p = Page("", "")
for s in symbols
(s[:file] == mod * ".jl") && continue
if p.name * ".jl" != s[:file]
p = filter(p -> p.name * ".jl" == s[:file], pages)
if length(p) == 0
continue
else
p = p[1]
end
println(f, """
* [$(p.title)]($(p.name).md)
""")
end
println(f, " * [$(s[:name])]($(refids[s[:name]]))")
end
end
end
println(yml, " - \"$(page.title)\": \"$(page.name).md\"")
end
end
run(`mkdocs build -c -f $mkdocsy`)
if any(x -> x=="--delete", ARGS)
for p in pages
path = joinpath(doc_src, p.name * ".md")
println("INFO - Removing $path")
rm(path)
end
if !dir_existed
println("INFO - Removing $doc_src/")
rm(doc_src)
end
println("INFO - Removing $mkdocsy")
rm(mkdocsy)
end
if any(x -> x=="--add", ARGS)
info("Adding all documentation changes in $(doc_src) to this commit.")
success(`git add $(doc_src)`)
end
end
| {"hexsha": "310c071f2078898eab06ade401a8ad54c94d1f38", "size": 10695, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "docs/build-docs.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/mPulseAPI.jl-9f4a2347-a52b-52c3-b18a-5631cfb5128d", "max_stars_repo_head_hexsha": "1c55c1d5f3c0b9b3d3e4067cf3797ef522c4f2f7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-25T00:57:25.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-25T00:57:25.000Z", "max_issues_repo_path": "docs/build-docs.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/mPulseAPI.jl-9f4a2347-a52b-52c3-b18a-5631cfb5128d", "max_issues_repo_head_hexsha": "1c55c1d5f3c0b9b3d3e4067cf3797ef522c4f2f7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2017-08-19T06:17:26.000Z", "max_issues_repo_issues_event_max_datetime": "2018-05-14T19:14:45.000Z", "max_forks_repo_path": "docs/build-docs.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/mPulseAPI.jl-9f4a2347-a52b-52c3-b18a-5631cfb5128d", "max_forks_repo_head_hexsha": "1c55c1d5f3c0b9b3d3e4067cf3797ef522c4f2f7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2016-10-02T07:59:26.000Z", "max_forks_repo_forks_event_max_datetime": "2018-02-26T18:27:24.000Z", "avg_line_length": 34.5, "max_line_length": 213, "alphanum_fraction": 0.4636746143, "num_tokens": 2507} |
/**************************************************************
* Copyright (c) 2008-2009 Daniel Pfeifer *
* *
* Distributed under the Boost Software License, Version 1.0. *
**************************************************************/
#ifndef BOOST_SQL_FIREBIRD_CONNECTION_HPP
#define BOOST_SQL_FIREBIRD_CONNECTION_HPP
#include <boost/sql/firebird/detail/service.hpp>
#include <boost/sql/detail/connection_base.hpp>
#include <string>
#include <stdexcept>
#include <boost/assert.hpp>
#include <sstream>
#include <ibase.h>
namespace boost
{
namespace sql
{
namespace firebird
{
class connection: sql::detail::connection_base<detail::service>
{
public:
connection(asio::io_service& io_service) :
sql::detail::connection_base<detail::service>(io_service), impl(0)
{
}
~connection()
{
isc_detach_database(service.status, &impl);
}
void open(std::string const& db_name, std::string const& parm_buffer)
{
if (!isc_attach_database(service.status, db_name.length(),
db_name.c_str(), &impl, parm_buffer.length(),
parm_buffer.c_str()))
{
service.throw_error();
}
}
unsigned long client_version()
{
return 100 * isc_get_client_major_version()
+ isc_get_client_minor_version();
}
unsigned long server_version()
{
return 0;
}
void execute(const std::string& query)
{
}
private:
isc_db_handle impl;
};
} // end namespace firebird
} // end namespace sql
} // end namespace boost
#endif /*BOOST_SQL_MYSQL_CONNECTION_HPP*/
| {"hexsha": "aa0c27a1e0fcb2a87d1f2abd0d74f8376e6ffc21", "size": 1551, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "include/boost/sql/firebird/connection.hpp", "max_stars_repo_name": "purpleKarrot/async-db", "max_stars_repo_head_hexsha": "172124e5657e3085e8ac7729c4e578c0d766bf7b", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 5.0, "max_stars_repo_stars_event_min_datetime": "2016-09-19T01:02:33.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-23T07:15:00.000Z", "max_issues_repo_path": "include/boost/sql/firebird/connection.hpp", "max_issues_repo_name": "purpleKarrot/async-db", "max_issues_repo_head_hexsha": "172124e5657e3085e8ac7729c4e578c0d766bf7b", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "include/boost/sql/firebird/connection.hpp", "max_forks_repo_name": "purpleKarrot/async-db", "max_forks_repo_head_hexsha": "172124e5657e3085e8ac7729c4e578c0d766bf7b", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.5416666667, "max_line_length": 70, "alphanum_fraction": 0.6305609284, "num_tokens": 342} |
\\ Count down from x
: cnt_down ( x -- )
DUP 0 >
IF 1- DUP . RECURSE
THEN
;
| {"hexsha": "700a3a7f1c4094a85a0c688ba7665624b89e2fae", "size": 100, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "release/cnt_down.f", "max_stars_repo_name": "jpoirier/meusForth", "max_stars_repo_head_hexsha": "db7ba3c17f1c672c4c35415a6a4e2e45e79b19ac", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2017-08-03T08:47:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-16T23:10:35.000Z", "max_issues_repo_path": "release/cnt_down.f", "max_issues_repo_name": "jpoirier/meusForth", "max_issues_repo_head_hexsha": "db7ba3c17f1c672c4c35415a6a4e2e45e79b19ac", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-03-02T12:14:08.000Z", "max_issues_repo_issues_event_max_datetime": "2020-06-28T18:25:43.000Z", "max_forks_repo_path": "release/cnt_down.f", "max_forks_repo_name": "jpoirier/meusForth", "max_forks_repo_head_hexsha": "db7ba3c17f1c672c4c35415a6a4e2e45e79b19ac", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 10.0, "max_line_length": 32, "alphanum_fraction": 0.44, "num_tokens": 35} |
{-# OPTIONS --without-K #-}
module sets.list.properties where
open import level
open import equality.core
open import sets.list.core
module _ {i}{A : Set i} where
data all {j}(P : A → Set j) : List A → Set (i ⊔ j) where
mk-all : ∀ {x xs} → P x → all P xs → all P (x ∷ xs)
data any {j}(P : A → Set j) : List A → Set (i ⊔ j) where
hd-any : ∀ {x xs} → P x → any P (x ∷ xs)
tl-any : ∀ {x xs} → any P xs → any P (x ∷ xs)
elem : A → List A → Set i
elem x = any (λ x' → x ≡ x')
| {"hexsha": "e80a524abddfd57ad0d2b7fa1632b1125678f138", "size": 495, "ext": "agda", "lang": "Agda", "max_stars_repo_path": "src/sets/list/properties.agda", "max_stars_repo_name": "pcapriotti/agda-base", "max_stars_repo_head_hexsha": "bbbc3bfb2f80ad08c8e608cccfa14b83ea3d258c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2015-06-12T12:20:17.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-01T11:25:54.000Z", "max_issues_repo_path": "src/sets/list/properties.agda", "max_issues_repo_name": "pcapriotti/agda-base", "max_issues_repo_head_hexsha": "bbbc3bfb2f80ad08c8e608cccfa14b83ea3d258c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2015-02-02T14:32:16.000Z", "max_issues_repo_issues_event_max_datetime": "2016-10-26T11:57:26.000Z", "max_forks_repo_path": "src/sets/list/properties.agda", "max_forks_repo_name": "pcapriotti/agda-base", "max_forks_repo_head_hexsha": "bbbc3bfb2f80ad08c8e608cccfa14b83ea3d258c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2015-02-02T12:17:00.000Z", "max_forks_repo_forks_event_max_datetime": "2019-05-04T19:31:00.000Z", "avg_line_length": 27.5, "max_line_length": 58, "alphanum_fraction": 0.5373737374, "num_tokens": 185} |