prompt
stringlengths 19
1.03M
| completion
stringlengths 4
2.12k
| api
stringlengths 8
90
|
---|---|---|
import math
from datetime import date, datetime
import pandas as pd
from airflow.models import Variable
from airflow.operators.bash import BashOperator
from airflow.operators.python_operator import PythonOperator
from minio import Minio
from sqlalchemy.engine import create_engine
from airflow import DAG
DEFAULT_ARGS = {
"owner": "Airflow",
"depends_on_past": False,
"start_date": datetime(2021, 1, 13),
}
dag = DAG(
"etl_time_in_company_att",
default_args=DEFAULT_ARGS,
schedule_interval="@once",
)
data_lake_server = Variable.get("data_lake_server")
data_lake_login = Variable.get("data_lake_login")
data_lake_password = Variable.get("data_lake_password")
database_server = Variable.get("database_server")
database_login = Variable.get("database_login")
database_password = Variable.get("database_password")
database_name = Variable.get("database_name")
url_connection = "mysql+pymysql://{}:{}@{}/{}".format(
str(database_login),
str(database_password),
str(database_server),
str(database_name),
)
engine = create_engine(url_connection)
client = Minio(
data_lake_server,
access_key=data_lake_login,
secret_key=data_lake_password,
secure=False,
)
def extract():
# query para consultar os dados.
query = """SELECT hire_date
FROM employees;"""
df_ = pd.read_sql_query(query, engine)
# persiste os arquivos na área de Staging.
df_.to_csv("/tmp/time_in_company.csv", index=False)
def transform():
# carrega os dados a partir da área de staging.
df_ = pd.read_csv("/tmp/time_in_company.csv")
# converte os dados para o formato datetime.
df_["hire_date"] = | pd.to_datetime(df_["hire_date"]) | pandas.to_datetime |
#!/usr/bin/python3
# -*- coding: utf-8 -*-
#
# ./list-files.py course_id
#
# outputs a summary of the files in a course
# also outputs an xlsx file of the form: files-course_id.xlsx
#
#
#
# with the option "-v" or "--verbose" you get lots of output - showing in detail the operations of the program
#
# Can also be called with an alternative configuration file:
# ./list_your_courses.py --config config-test.json
#
# Example:
# ./my-files.py
#
# ./my-files.py --config config-test.json 11
#
# for testing - skips some files:
# ./my-files.py -t
#
# ./my-files.py -t --config config-test.json
#
# documentation about using xlsxwriter to insert images can be found at:
# <NAME>, "Example: Inserting images into a worksheet", web page, 10 November 2018, https://xlsxwriter.readthedocs.io/example_images.html
#
# <NAME>.
#
# based on earlier my-files.py
#
# 2019.01.17
#
import requests, time
import pprint
import optparse
import sys
import json
# Use Python Pandas to create XLSX files
import pandas as pd
#############################
###### EDIT THIS STUFF ######
#############################
global baseUrl # the base URL used for access to Canvas
global header # the header for all HTML requests
global payload # place to store additionally payload when needed for options to HTML requests
# Based upon the options to the program, initialize the variables used to access Canvas gia HTML requests
def initialize(options):
global baseUrl, header, payload
# styled based upon https://martin-thoma.com/configuration-files-in-python/
if options.config_filename:
config_file=options.config_filename
else:
config_file='config.json'
try:
with open(config_file) as json_data_file:
configuration = json.load(json_data_file)
access_token=configuration["canvas"]["access_token"]
baseUrl="https://"+configuration["canvas"]["host"]+"/api/v1"
header = {'Authorization' : 'Bearer ' + access_token}
payload = {}
except:
print("Unable to open configuration file named {}".format(config_file))
print("Please create a suitable configuration file, the default name is config.json")
sys.exit()
def list_files(course_id):
files_found_thus_far=[]
# Use the Canvas API to get the list of files for the course
#GET /api/v1/courses/:course_id/files
url = "{0}/courses/{1}/files".format(baseUrl, course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting files: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
files_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of files
# see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
if Verbose_Flag:
print("result of getting files for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
files_found_thus_far.append(p_response)
return files_found_thus_far
def list_folders(course_id):
folders_found_thus_far=[]
# Use the Canvas API to get the list of folders for the course
#GET /api/v1/courses/:course_id/folders
url = "{0}/courses/{1}/folders".format(baseUrl, course_id)
if Verbose_Flag:
print("url: {}".format(url))
r = requests.get(url, headers = header)
if Verbose_Flag:
print("result of getting folders: {}".format(r.text))
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
folders_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of folders
# see "Handling Pagination" - Discussion created by <EMAIL> on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
if Verbose_Flag:
print("result of getting folders for a paginated response: {}".format(r.text))
page_response = r.json()
for p_response in page_response:
folders_found_thus_far.append(p_response)
return folders_found_thus_far
def main():
global Verbose_Flag
default_picture_size=128
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
parser.add_option("--config", dest="config_filename",
help="read configuration from FILE", metavar="FILE")
parser.add_option('-t', '--testing',
dest="testing",
default=False,
action="store_true",
help="testing mode for skipping files for some courses"
)
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print("ARGV : {}".format(sys.argv[1:]))
print("VERBOSE : {}".format(options.verbose))
print("REMAINING : {}".format(remainder))
print("Configuration file : {}".format(options.config_filename))
initialize(options)
if (len(remainder) < 1):
print("Insuffient arguments\n must provide course_id\n")
return
course_id=remainder[0]
folders=list_folders(course_id)
folders_df=pd.json_normalize(folders)
output=list_files(course_id)
if output and len(output) > 0 and Verbose_Flag:
print("number of files in course is {}".format(len(output)))
# the following was inspired by the section "Using XlsxWriter with Pandas" on http://xlsxwriter.readthedocs.io/working_with_pandas.html
# set up the output write
writer = pd.ExcelWriter('files-'+str(course_id)+'.xlsx', engine='xlsxwriter')
if (output):
# create a Panda dataframe from the output
df= | pd.json_normalize(output) | pandas.json_normalize |
import matplotlib.pyplot as plt
from matplotlib.ticker import FormatStrFormatter
hfont = {'fontname':'Helvetica'}
# plt.rcParams['figure.figsize']=(10,8)
# plt.rcParams['font.family'] = 'sans-serif'
# plt.rcParams['font.sans-serif'] = ['Tahoma', 'DejaVu Sans','Lucida Grande', 'Verdana']
plt.rcParams['font.size']=6
# plt.rcParams["figure.figsize"] = [12, 8]
plt.rcParams['image.cmap']='Purples'
# plt.rcParams['axes.linewidth']=0.8
import pandas as pd
import numpy as np
from sklearn.feature_selection import mutual_info_regression
from sklearn.ensemble import ExtraTreesRegressor,GradientBoostingRegressor
import os
root_path = os.path.dirname(os.path.abspath('__file__'))
graphs_path = root_path+'/graphs/'
import sys
sys.path.append(root_path)
h_modwt_1 = pd.read_csv(root_path+'/Huaxian_modwt/data-wddff/db1-4/single_hybrid_1_ahead_lag12_mi_ts0.1/cal_samples.csv')
h_modwt_3 = pd.read_csv(root_path+'/Huaxian_modwt/data-wddff/db1-4/single_hybrid_3_ahead_lag12_mi_ts0.1/cal_samples.csv')
h_modwt_5 = pd.read_csv(root_path+'/Huaxian_modwt/data-wddff/db1-4/single_hybrid_5_ahead_lag12_mi_ts0.1/cal_samples.csv')
h_modwt_7 = pd.read_csv(root_path+'/Huaxian_modwt/data-wddff/db1-4/single_hybrid_7_ahead_lag12_mi_ts0.1/cal_samples.csv')
h_dwt_1 = pd.read_csv(root_path+'/Huaxian_dwt/data/db10-2/one_step_1_ahead_forecast_pacf/train_samples.csv')
h_dwt_3 = pd.read_csv(root_path+'/Huaxian_dwt/data/db10-2/one_step_3_ahead_forecast_pacf/train_samples.csv')
h_dwt_5 = pd.read_csv(root_path+'/Huaxian_dwt/data/db10-2/one_step_5_ahead_forecast_pacf/train_samples.csv')
h_dwt_7 = pd.read_csv(root_path+'/Huaxian_dwt/data/db10-2/one_step_7_ahead_forecast_pacf/train_samples.csv')
h_vmd_1 = pd.read_csv(root_path+'/Huaxian_vmd/data/one_step_1_ahead_forecast_pacf/train_samples.csv')
h_vmd_3 = pd.read_csv(root_path+'/Huaxian_vmd/data/one_step_3_ahead_forecast_pacf/train_samples.csv')
h_vmd_5 = pd.read_csv(root_path+'/Huaxian_vmd/data/one_step_5_ahead_forecast_pacf/train_samples.csv')
h_vmd_7 = pd.read_csv(root_path+'/Huaxian_vmd/data/one_step_7_ahead_forecast_pacf/train_samples.csv')
h_eemd_1 = pd.read_csv(root_path+'/Huaxian_eemd/data/one_step_1_ahead_forecast_pacf/train_samples.csv')
h_eemd_3 = pd.read_csv(root_path+'/Huaxian_eemd/data/one_step_3_ahead_forecast_pacf/train_samples.csv')
h_eemd_5 = pd.read_csv(root_path+'/Huaxian_eemd/data/one_step_5_ahead_forecast_pacf/train_samples.csv')
h_eemd_7 = pd.read_csv(root_path+'/Huaxian_eemd/data/one_step_7_ahead_forecast_pacf/train_samples.csv')
h_ssa_1 = pd.read_csv(root_path+'/Huaxian_ssa/data/one_step_1_ahead_forecast_pacf/train_samples.csv')
h_ssa_3 = pd.read_csv(root_path+'/Huaxian_ssa/data/one_step_3_ahead_forecast_pacf/train_samples.csv')
h_ssa_5 = pd.read_csv(root_path+'/Huaxian_ssa/data/one_step_5_ahead_forecast_pacf/train_samples.csv')
h_ssa_7 = | pd.read_csv(root_path+'/Huaxian_ssa/data/one_step_7_ahead_forecast_pacf/train_samples.csv') | pandas.read_csv |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Script to interpolate atmospheric data from UERRA: increase temporal resolution, and save into speed, cosine of angle and sine of angle, pressure and pressure gradient
Created on Mon Apr 11 15:01:26 2022
@author: <NAME> (<EMAIL>)
"""
#%%
import pandas as pd
import time
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp1d
import datetime
#%% Load date
W = pd.read_csv('../data/uerra_1h_c.csv')
speed = np.array(W['wind_speed'])
angle = np.array(W['wind_direction'])
press = np.array(W['pressure'])
px = np.array(W['pressure'])
py = np.array(W['pressure'])
W.time = pd.to_datetime(W.time, format='%Y-%m-%d %H:%M:%S')
t = np.array((W.time - W.time.dt.floor('D')[0]).dt.total_seconds())
#%% Useful definitions
dt = 600 #dt for the new time series in seconds
gdr = np.pi/180 # useful to transform from degrees to radians
#%%
# Define interpolating functions
# For speed and pressure linear interpolation is used
# For the angle, the cosine of the angle is interpolated using the nearest neighbour.
# Then, the angle is recomputed and used to compute the sine
fs = interp1d(t,speed)
fp = interp1d(t,press)
fd = interp1d(t,np.cos(angle),kind='nearest')
#fpx = fp = interp1d(t,px)
#fpy = fp = interp1d(t,px)
# Perform the interpolation
t_i = np.arange(t[0],t[-1:],dt) # New time vector
speed_i = fs(t_i)
press_i = fp(t_i)
#px_i = fpx(t_i)
#py_i = fpy(t_i)
c_angle_i = fd(t_i)
angle_i = np.arccos(c_angle_i)
s_angle_i = np.sin(angle_i)
#%%
time = pd.to_datetime(t_i + W.time[0].timestamp()-3600, unit='s')
# %%
d = {'time': time, 'wind_speed': speed_i, 'sine_wind_angle': s_angle_i, 'cosine_wind_angle': c_angle_i, 'pressure': press_i}#, 'pressure_x_g': px_i, 'pressure_y_g': py_i}
df = | pd.DataFrame(data=d) | pandas.DataFrame |
# Project: GBS Tool
# Author: <NAME>, <EMAIL>
# Date: October 24, 2017
# License: MIT License (see LICENSE file of this package for more information)
#Reads a dataframe and ouputs a new dataframe with the specified sampling time interval.
#interval is a string with time units. (i.e. '30s' for 30 seconds, '1T' for 1 minute)
#If interval is less than the interval within the dataframe mean values are used to create a down-sampled dataframe
#DataClass, String -> DataClass
def fixDataInterval(data, interval):
''' data is a DataClass with a pandas dataframe with datetime index.
interval is the desired interval of data samples. If this is
less than what is available in the df a langevin estimator will be used to fill
in missing times. If the interval is greater than the original time interval
the mean of values within the new interval will be generated'''
import pandas as pd
import numpy as np
#integer, numeric, numeric, numeric -> numeric array
#uses the Langevin equation to estimate records based on provided mean (mu) and standard deviation and a start value
def getValues(records, start, sigma, timestep):
'''
Uses the Langevin equation to estimate records based on provided mean (mu) and standard deviation and a start value
:param records: [Array of Integers] the number of timesteps to estimate values for
:param start: [Array of numeric] the start value to initiate the estimator for a given record
:param sigma: [Array of numeric] the standard deviation to use in the estimator
:param timestep: [Array of integer] the timestep to use in the estimator
:return: [Array of timestamps], [Array of numeric] the timestamp and estimated values for each record
'''
# sigma is the the standard deviation for 1000 samples at timestep interval
import numpy as np
#number of steps
n = (records / timestep) + 1
# time constant. This value was empirically determined to result in a mean value between
tau = records*.2
tau[tau<1] = 1
#renormalized variables
# sigma scaled takes into account the difference in STD for different number of samples of data. Given a STD for
# 1000 data samples (sigma) the mean STD that will be observed is sigmaScaled, base empirically off of 1 second
sigma_bis = 0.4 * sigma * np.sqrt(2.0/(900*timestep))
sqrtdt = np.sqrt(timestep)
# find the 95th percentile of number of steps
n95 = int(np.percentile(n,95))
# find where over the 95th percentile
idxOver95 = np.where(n > n95)[0]
#x is the array that will contain the new values
x = np.zeros(shape=(len(start), int(n95)))
# steps is an array of timesteps in seconds with length = max(records)
steps = np.arange(0, int(n95)*timestep, timestep)
# t is the numeric value of the dataframe timestamps
t = pd.to_timedelta(pd.Series(pd.to_datetime(start.index.values, unit='s'), index=start.index)).dt.total_seconds()
# intervals is the steps array repeated for every row of time
intervals = np.repeat(steps, len(t), axis=0)
# reshape the interval matrix so each row has every timestep
intervals_reshaped = intervals.reshape(len(steps), len(t))
tr = t.repeat(len(steps))
rs = tr.values.reshape(len(t), len(steps))
time_matrix = rs + intervals_reshaped.transpose()
# put all the times in a single array
#the starter value
x[:, 0] = start
# use the next step in the time series as the average value for the synthesized data. The values will asympotically reach this value, resulting in a smooth transition.
mu = start.shift(-1)
mu.iloc[-1] = mu.iloc[-2]
#TODO replace for loops
for i in range(n95-1):
x[:, i + 1] = x[:, i] + timestep * (-(x[:, i] - mu) / tau) + np.multiply(sigma_bis.values * sqrtdt, np.random.randn(len(mu)))
# remove extra values to avoid improper mixing of values when sorting
for row in range(time_matrix.shape[0]):
time_matrix[row,int(n[row]):] = None
x[row,int(n[row]):] = None
timeArray = np.concatenate(time_matrix)
values = np.concatenate(x)
# this can only work as long as fixBadData removes any None values
values = values[values != None]
timeArray = timeArray[timeArray != None]
# individually calc the rest
for idx in idxOver95:
# find remaining values to be calculated
nRemaining = int(max([n[idx] - n95, 0]))
# calc remaining values
x0 = np.zeros(shape = (nRemaining,))
# first value is last value of array
x0[0] = x[idx, -1]
# corresponding time matrix
time_matrix0 = time_matrix[idx,-1] + np.arange(0,nRemaining*timestep,timestep)
for idx0 in range(1,nRemaining):
x0[idx0] = x0[idx0-1] + timestep * (-(x0[idx0-1] - mu[idx]) / tau[idx]) + np.multiply(sigma_bis.values[idx] * sqrtdt, np.random.randn())
# append to already calculated values
values = np.append(values, x0)
timeArray = np.append(timeArray,time_matrix0)
# TODO: sort values and timeArray by TimeArray.
tv = np.array(list(zip(timeArray,values)))
tv = tv[tv[:,0].argsort()] # sort by timeArray
t, v = zip(*tv)
return t,v
#dataframe -> integer array, integer array
#returns arrays of time as seconds and values estimated using the Langevin equation
#for all gaps of data within a dataframe
def estimateDistribution(df,interval,col):
import numpy as np
#feeders for the langevin estimate
mu = df[col+'_mu']
start = df[col]
sigma = df[col+'_sigma']
records = df['timediff'] / pd.to_timedelta(interval)
timestep = pd.Timedelta(interval).seconds
#handle memory error exceptions by working with smaller subsets
try:
#return an array of arrays of values
timeArray, values = getValues(records, start, sigma,timestep)
return timeArray, values
except MemoryError:
handleMemory()
return
#steps is an array of timesteps in seconds with length = max(records)
for idx in range(len(data.fixed)):
# df contains the non-upsampled records. Means and standard deviation come from non-upsampled data.
df = data.fixed[idx].copy()
# create a list of individual loads, total of power components and environmental measurements to fix interval on
fixColumns = []
fixColumns.extend(data.loads)
fixColumns.extend(data.eColumns)
#if there are power components fix intervals based on total power and scale to each component
if df['total_p'].first_valid_index() != None:
fixColumns.append('total_p')
print('before upsamping dataframe is: %s' %len(data.fixed[idx]))
# up or down sample to our desired interval
# down sampling results in averaged values
#this changes the size fo data.fixed[idx], so it no longer matches df rowcount.
data.fixed[idx] = data.fixed[idx].resample(pd.to_timedelta(interval[0])).mean()
print('after upsamping dataframe is: %s' % len(data.fixed[idx]))
for col in fixColumns:
df0 = df[[col]].copy()
# remove rows that are nan for this column, except for the first and last, in order to keep the same first and last
# time stamps
df0 = pd.concat([df0.iloc[[0]], df0.iloc[1:-2].dropna(), df0.iloc[[-1]]])
# get first and last non nan indecies
idx0 = df0[col].first_valid_index()
if idx0 != None:
df0[col][0] = df0[col][idx0]
idx1 = df0[col].last_valid_index()
if idx1 != None:
df0[col][-1] = df0[col][idx1]
# time interval between consecutive records
df0['timediff'] = pd.Series(pd.to_datetime(df0.index, unit='s'), index=df0.index).diff(1).shift(-1)
df0['timediff'] = df0['timediff'].fillna(0)
# get the median number of steps in a 24 hr period.
steps1Day = int( | pd.to_timedelta(1, unit='d') | pandas.to_timedelta |
import argparse
import pandas as pd
import numpy as np
from datetime import timedelta
def set_interval(df, interval, agg):
df_sampled = df.resample(interval).agg(agg)
period = {"H": 24, "30min": 48, "10min": 144, "min": 1440}
return df_sampled, period[interval]
"""Script for generating cluster sequence file"""
def parse_sequence(dir):
date_range = ('2020-04-01 00:00:00', '2020-09-28 23:59:00')
log = pd.read_csv(f'{dir}/cluster_log.csv',
parse_dates=['submit_time', 'start_time', 'end_time'])
cluster_gpu = pd.read_csv(
f'{dir}/cluster_gpu_number.csv', parse_dates=['date'])
df = pd.DataFrame(pd.date_range(
start=date_range[0], end=date_range[1], freq='T'), columns=['time'])
columns = ['running_gpujob_num', 'total_gpu_num', 'running_gpu_num',
'running_gpu_multi', 'running_gpu_single', 'gpu_utilization',
'gpu_utilization_multi', 'gpu_utilization_single']
df[columns] = 0
log = log[log['gpu_num'] > 0]
# parse each job
for _, job in log.iterrows():
gnum = job['gpu_num']
job['submit_time'] = job['submit_time'].replace(second=0)
job['start_time'] = job['start_time'].replace(second=0)
job['end_time'] = job['end_time'].replace(second=0)
run = (df['time'] >= job['start_time']) & (
df['time'] <= job['end_time'])
if gnum > 0:
df.loc[run, 'running_gpujob_num'] += 1
df.loc[run, 'running_gpu_num'] += gnum
if gnum > 1:
df.loc[run, 'running_gpu_multi'] += gnum
else:
df.loc[run, 'running_gpu_single'] += gnum
cluster_gpu = cluster_gpu[['date', 'total']]
cluster_gpu = cluster_gpu[(cluster_gpu['date'] >= pd.Timestamp(
date_range[0])) & (cluster_gpu['date'] <= pd.Timestamp(date_range[1]))]
cluster_gpu = pd.concat([cluster_gpu]*1440).sort_index()
df['total_gpu_num'] = cluster_gpu['total'].values
df['running_gpu_num'] = df['running_gpu_num'].combine(
df['total_gpu_num'], min)
df['gpu_utilization'] = (df['running_gpu_num'] /
df['total_gpu_num']).round(3)
df['gpu_utilization_multi'] = (df['running_gpu_multi'] /
df['total_gpu_num']).round(3)
df['gpu_utilization_single'] = (df['running_gpu_single'] /
df['total_gpu_num']).round(3)
df.set_index('time', inplace=True, drop=True)
return df
"""Script for generating cluster throughput file"""
def parse_throughput(dir):
date_range = ("2020-04-01 00:00:00", "2020-09-28 23:50:00")
log = pd.read_csv(
f"{dir}/cluster_log.csv", parse_dates=["submit_time", "start_time", "end_time"]
)
df = pd.DataFrame(
pd.date_range(start=date_range[0], end=date_range[1], freq="10T"),
columns=["time"],
)
df[
[
"submit_job_all",
"start_job_all",
"end_job_all",
"submit_gpu_job",
"start_gpu_job",
"end_gpu_job",
"submit_gpu_num",
"start_gpu_num",
"end_gpu_num",
]
] = 0
df.set_index("time", inplace=True, drop=True)
for i in range(len(df)):
for kind in ("submit", "start", "end"):
jobs = log[
(log[kind + "_time"] >= df.index[i])
& (log[kind + "_time"] < df.index[i] + timedelta(minutes=10))
]
df[kind + "_job_all"][i] = len(jobs)
df[kind + "_gpu_job"][i] = len(jobs[jobs["gpu_num"] != 0])
df[kind + "_gpu_num"][i] = jobs["gpu_num"].agg(sum)
return df
"""Script for generating cluster user file"""
def parse_user(dir, helios=False):
# df : contain cpu and gpu jobs
# dfgpu: only gpu jobs
# helios: analyze the whole data center users
if helios:
df = pd.read_csv(
f"{dir}/all_cluster_log.csv",
parse_dates=["submit_time", "start_time", "end_time"],
)
else:
df = pd.read_csv(
f"{dir}/cluster_log.csv",
parse_dates=["submit_time", "start_time", "end_time"],
)
dfgpu = df[df["gpu_num"] > 0]
users = df["user"].unique()
users_gpu = dfgpu["user"].unique()
user_df = pd.DataFrame({"user": users})
user_df = user_df.set_index("user")
user_df["cpu_only"] = False
user_df[["vc_list", "vc_list_gpu"]] = None
for u in users:
data = df[df["user"] == u]
datagpu = dfgpu[dfgpu["user"] == u]
if u in users_gpu:
cpu_job_num = len(data) - len(datagpu)
plist = data["vc"].unique().tolist()
user_df.at[u, "vc_list"] = plist
user_df.at[u, "vc_num"] = len(plist)
glist = datagpu["vc"].unique().tolist()
user_df.at[u, "vc_list_gpu"] = glist
user_df.at[u, "vc_num_gpu"] = len(glist)
user_df.at[u, "job_num"] = len(data)
user_df.at[u, "gpu_job_num"] = len(datagpu)
user_df.at[u, "cpu_job_num"] = cpu_job_num
user_df.at[u, "avg_run_time"] = data["duration"].mean()
user_df.at[u, "avg_gpu_run_time"] = datagpu["duration"].mean()
user_df.at[u, "avg_cpu_run_time"] = (
0
if cpu_job_num == 0
else (data["duration"].sum() - datagpu["duration"].sum()) / cpu_job_num
)
user_df.at[u, "avg_pend_time"] = data["queue"].mean()
user_df.at[u, "avg_gpu_pend_time"] = datagpu["queue"].mean()
user_df.at[u, "avg_cpu_pend_time"] = (
0
if cpu_job_num == 0
else (data["queue"].sum() - datagpu["queue"].sum()) / cpu_job_num
)
user_df.at[u, "avg_gpu_num"] = data["gpu_num"].mean()
user_df.at[u, "avg_cpu_num"] = data["cpu_num"].mean()
user_df.at[u, "total_gpu_time"] = (
datagpu["gpu_num"] * datagpu["duration"]
).sum()
user_df.at[u, "total_cpu_time"] = (
data["cpu_num"] * data["duration"]).sum()
user_df.at[u, "total_cpu_only_time"] = (
user_df.at[u, "total_cpu_time"]
- (datagpu["cpu_num"] * datagpu["duration"]).sum()
)
user_df.at[u, "total_gpu_pend_time"] = datagpu["queue"].sum()
user_df.at[u, "completed_percent"] = len(
data[data["state"] == "COMPLETED"]
) / len(data)
user_df.at[u, "completed_gpu_percent"] = len(
datagpu[datagpu["state"] == "COMPLETED"]
) / len(datagpu)
user_df.at[u, "completed_cpu_percent"] = (
0
if cpu_job_num == 0
else (
len(data[data["state"] == "COMPLETED"])
- len(datagpu[datagpu["state"] == "COMPLETED"])
)
/ cpu_job_num
)
user_df.at[u, "cencelled_percent"] = len(
data[data["state"] == "CANCELLED"]
) / len(data)
user_df.at[u, "cencelled_gpu_percent"] = len(
datagpu[datagpu["state"] == "CANCELLED"]
) / len(datagpu)
user_df.at[u, "cencelled_cpu_percent"] = (
0
if cpu_job_num == 0
else (
len(data[data["state"] == "CANCELLED"])
- len(datagpu[datagpu["state"] == "CANCELLED"])
)
/ cpu_job_num
)
user_df.at[u, "failed_percent"] = len(
data[data["state"] == "FAILED"]
) / len(data)
user_df.at[u, "failed_gpu_percent"] = len(
datagpu[datagpu["state"] == "FAILED"]
) / len(datagpu)
user_df.at[u, "failed_cpu_percent"] = (
0
if cpu_job_num == 0
else (
len(data[data["state"] == "FAILED"])
- len(datagpu[datagpu["state"] == "FAILED"])
)
/ cpu_job_num
)
else:
user_df.at[u, "cpu_only"] = True
plist = data["vc"].unique().tolist()
user_df.at[u, "vc_list"] = plist
user_df.at[u, "vc_num"] = len(plist)
user_df.at[u, "vc_list_gpu"] = []
user_df.at[u, "vc_num_gpu"] = 0
user_df.at[u, "job_num"] = len(data)
user_df.at[u, "gpu_job_num"] = 0
user_df.at[u, "cpu_job_num"] = len(data)
user_df.at[u, "avg_run_time"] = data["duration"].mean()
user_df.at[u, "avg_gpu_run_time"] = 0
user_df.at[u, "avg_cpu_run_time"] = user_df.at[u, "avg_run_time"]
user_df.at[u, "avg_pend_time"] = data["queue"].mean()
user_df.at[u, "avg_gpu_pend_time"] = 0
user_df.at[u, "avg_cpu_pend_time"] = user_df.at[u, "avg_pend_time"]
user_df.at[u, "avg_gpu_num"] = 0
user_df.at[u, "avg_cpu_num"] = data["cpu_num"].mean()
user_df.at[u, "total_gpu_time"] = 0
user_df.at[u, "total_cpu_time"] = (
data["cpu_num"] * data["duration"]).sum()
user_df.at[u, "total_cpu_only_time"] = user_df.at[u,
"total_cpu_time"]
user_df.at[u, "total_gpu_pend_time"] = 0
user_df.at[u, "completed_percent"] = len(
data[data["state"] == "COMPLETED"]
) / len(data)
user_df.at[u, "completed_gpu_percent"] = 0
user_df.at[u, "completed_cpu_percent"] = user_df.at[u,
"completed_percent"]
user_df.at[u, "cencelled_percent"] = len(
data[data["state"] == "CANCELLED"]
) / len(data)
user_df.at[u, "cencelled_gpu_percent"] = 0
user_df.at[u, "cencelled_cpu_percent"] = user_df.at[u,
"cencelled_percent"]
user_df.at[u, "failed_percent"] = len(
data[data["state"] == "FAILED"]
) / len(data)
user_df.at[u, "failed_gpu_percent"] = 0
user_df.at[u, "failed_cpu_percent"] = user_df.at[u,
"failed_percent"]
user_df.sort_values(by="job_num", ascending=False, inplace=True)
user_df[
["vc_num", "vc_num_gpu", "job_num", "gpu_job_num", "cpu_job_num"]
] = user_df[
["vc_num", "vc_num_gpu", "job_num", "gpu_job_num", "cpu_job_num"]
].astype(
int
)
return user_df
"""Script for generating a all cluster trace file"""
def parse_all_cluster_log(clusters):
logall = pd.DataFrame()
for cluster in clusters:
print(f'Parsing {cluster}')
data_dir = f'../data/{cluster}'
log = pd.read_csv(f'{data_dir}/cluster_log.csv',
parse_dates=['submit_time', 'start_time', 'end_time'])
log['c'] = cluster
logall = pd.concat([logall, log])
logall.insert(1, 'cluster', logall['c'])
logall.drop(columns='c', inplace=True)
logall.to_csv('../data/all_cluster_log.csv', index=False)
"""Script for generating all cluster monthly file"""
def parse_monthly_job(cluster_name):
df = pd.DataFrame(cluster_name, columns=['id'])
df.set_index('id', drop=True, inplace=True)
for cluster in cluster_name:
data_dir = f'../data/{cluster}'
log = pd.read_csv(f'{data_dir}/cluster_log.csv',
parse_dates=['submit_time', 'start_time', 'end_time'])
log['month'] = getattr(log['submit_time'].dt, 'month').astype(np.int16)
glog = log[log['gpu_num'] > 0]
clog = log[log['gpu_num'] == 0]
for m in range(4, 10):
month_glog = glog[glog['month'] == m]
df.at[cluster, str(m) + ' Job Num'] = len(log[log['month'] == m])
df.at[cluster, str(
m) + ' CPU Job Num'] = len(clog[clog['month'] == m])
df.at[cluster, str(m) + ' GPU Job Num'] = len(month_glog)
df.at[cluster, str(
m) + ' GJobNum 1'] = len(month_glog[month_glog['gpu_num'] == 1])
df.at[cluster, str(
m) + ' GJobNum g1'] = len(month_glog[month_glog['gpu_num'] > 1])
df = df.astype(int)
df.to_csv('../data/all_cluster_monthly_job_num.csv')
def parse_monthly_util(cluster_list):
df = | pd.DataFrame() | pandas.DataFrame |
"""Pre-process accessibility-based provincial OD matrix
Purpose
-------
Create province scale OD matrices between roads connecting villages to nearest communes:
- Net revenue estimates of commune villages
- IFPRI crop data at 1km resolution
Input data requirements
-----------------------
1. Correct paths to all files and correct input parameters
2. Geotiff files with IFPRI crop data:
- tons - Float values of production tonnage at each grid cell
- geometry - Raster grid cell geometry
3. Shapefile of RiceAtlas data:
- month production columns - tonnage of rice for each month
- geometry - Shapely Polygon geometry of Provinces
4. Shapefile of Provinces
- od_id - Integer Province ID corresponding to OD ID
- name_eng - String name of Province in English
- geometry - Shapely Polygon geometry of Provinces
5. Shapefile of Communes
- population - Float values of populations in Communes
- nfrims - Float values of number of firms in Provinces
- netrevenue - Float values of Net Revenue in Provinces
- argi_prop - Float values of proportion of agrivculture firms in Provinces
- geometry - Shapely Polygon geometry of Communes
6. Shapefiles of network nodes
- node_id - String node ID
- geometry - Shapely point geometry of nodes
7. Shapefiles of network edges
- vehicle_co - Count of vehiles only for roads
- geometry - Shapely LineString geometry of edges
8. Shapefiles of Commune center points
- object_id - Integer ID of point
- geometry - Shapely point geometry of points
9. Shapefiles of Village center points
- object_id - Integer ID of points
- geometry - Shapely point geometry of points
Results
-------
1. Excel workbook with sheet of province-wise OD flows
- origin - String node ID of origin node
- destination - String node ID of destination node
- crop_names - Float values of daily tonnages of IFPRI crops (except rice) between OD nodes
- min_rice - Float values of minimum daily tonnages of rice between OD nodes
- max_rice - Float values of maximum daily tonnages of rice between OD nodes
- min_croptons - Float values of minimum daily tonnages of crops between OD nodes
- max_croptons - Float values of maximum daily tonnages of crops between OD nodes
- min_agrirev - Float value of Minimum daily revenue of agriculture firms between OD nodes
- max_agrirev - Float value of Maximum daily revenue of agriculture firms between OD nodes
- min_noagrirev - Float value of Minimum daily revenue of non-agriculture firms between OD nodes
- max_noagrirev - Float value of Maximum daily revenue of non-agriculture firms between OD nodes
- min_netrev - Float value of Minimum daily revenue of all firms between OD nodes
- max_netrev - Float value of Maximum daily revenue of all firms between OD nodes
References
----------
1. <NAME>., <NAME>., <NAME>., <NAME>. & <NAME>. (2018).
Analysis and development of model for addressing climate change/disaster risks in multi-modal transport networks in Vietnam.
Final Report, Oxford Infrastructure Analytics Ltd., Oxford, UK.
2. All input data folders and files referred to in the code below.
"""
import os
import subprocess
import sys
import geopandas as gpd
import igraph as ig
import numpy as np
import pandas as pd
from shapely.geometry import Point
from vtra.utils import *
def netrev_od_pairs(start_points, end_points):
"""Assign crop tonnages to OD pairs
Parameters
- start_points - GeoDataFrame of start points for Origins
- end_points - GeoDataFrame of potential end points for Destinations
Outputs
od_pairs_df - Pandas DataFrame with columns:
- origin - Origin node ID
- destination - Destination node ID
- netrev_argi - Net revenue of agriculture firms
- netrev_noargi - Net revenue of non-agriculture firms
"""
save_paths = []
for iter_, place in start_points.iterrows():
try:
closest_center = end_points.loc[end_points['OBJECTID']
== place['NEAREST_C_CENTER']]['NEAREST_G_NODE'].values[0]
save_paths.append(
(closest_center, place['NEAREST_G_NODE'], place['netrev_agri'], place['netrev_noagri']))
except:
print(iter_)
od_pairs_df = pd.DataFrame(
save_paths, columns=['origin', 'destination', 'netrev_agri', 'netrev_noagri'])
od_pairs_df = od_pairs_df.groupby(['origin', 'destination'])[
'netrev_agri', 'netrev_noagri'].sum().reset_index()
return od_pairs_df
def crop_od_pairs(start_points, end_points, crop_name):
"""Assign crop tonnages to OD pairs
Parameters
- start_points - GeoDataFrame of start points for Origins
- end_points - GeoDataFrame of potential end points for Destinations
- crop_name - String name of crop
Outputs
od_pairs_df - Pandas DataFrame wit columns:
- origin - Origin node ID
- destination - Destination node ID
- crop - Tonnage values for the named crop
- netrev_argi - Daily Net revenue of agriculture firms in USD
- netrev_noargi - Daily Net revenue of non-agriculture firms in USD
"""
save_paths = []
for iter_, place in start_points.iterrows():
try:
closest_center = end_points.loc[end_points['OBJECTID']
== place['NEAREST_C_CENTER']]['NEAREST_G_NODE'].values[0]
save_paths.append((closest_center, place['NEAREST_G_NODE'], place['tons']))
except:
print(iter_)
od_pairs_df = pd.DataFrame(save_paths, columns=['origin', 'destination', crop_name])
od_pairs_df = od_pairs_df.groupby(['origin', 'destination'])[crop_name].sum().reset_index()
return od_pairs_df
def assign_monthly_tons_crops(x,rice_prod_file,crop_month_fields,province,x_cols):
"""Assign crop tonnages to OD pairs
Parameters
- x - Pandas DataFrame of values
- rice_prod_file - Shapefile of RiceAtlas monthly production value
- crop_month_fields - Lsit of strings of month columns in Rice Atlas shapefile
- province - Stirng name of province
- x_cols - List of string names of crops
Outputs
- min_croptons - Float value of Minimum daily tonnages of crops
- max_croptons - Float value of Maximum daily tonnages of crops
"""
# find the crop production months for the province
rice_prod_months = gpd.read_file(rice_prod_file)
rice_prod_months = rice_prod_months.loc[rice_prod_months.SUB_REGION == province]
rice_prod_months = rice_prod_months[crop_month_fields].values.tolist()
rice_prod_months = np.array(rice_prod_months[0])/sum(rice_prod_months[0])
rice_prod_months = rice_prod_months[rice_prod_months > 0]
rice_prod_months = rice_prod_months.tolist()
min_croptons = 0
max_croptons = 0
for x_name in x_cols:
if x_name == 'rice':
min_croptons += (1.0*min(rice_prod_months)*x[x_name])/30.0
max_croptons += (1.0*max(rice_prod_months)*x[x_name])/30.0
else:
min_croptons += (1.0*x[x_name])/365.0
max_croptons += (1.0*x[x_name])/365.0
return min_croptons, max_croptons
def assign_io_rev_costs_crops(x, cost_dataframe,rice_prod_file,crop_month_fields,province, x_cols, ex_rate):
"""Assign crop tonnages to daily net revenues
Parameters
- x - Pandas DataFrame of values
- cost_dataframe - Pandas DataFrame of conversion of tonnages to net revenues
- rice_prod_file - Shapefile of RiceAtlas monthly production value
- province - Stirng name of province
- x_cols - List of string names of crops
- ex_rate - Exchange rate from VND millions to USD
Outputs
- min_croprev - Float value of Minimum daily revenue of crops
- max_croprev - Float value of Maximum daily revenue of crops
"""
# find the crop production months for the province
rice_prod_months = gpd.read_file(rice_prod_file)
rice_prod_months = rice_prod_months.loc[rice_prod_months.SUB_REGION == province]
rice_prod_months = rice_prod_months[crop_month_fields].values.tolist()
rice_prod_months = np.array(rice_prod_months[0])/sum(rice_prod_months[0])
rice_prod_months = rice_prod_months[rice_prod_months > 0]
rice_prod_months = rice_prod_months.tolist()
min_croprev = 0
max_croprev = 0
cost_list = list(cost_dataframe.itertuples(index=False))
for cost_param in cost_list:
if cost_param.crop_code in x_cols:
if cost_param.crop_code == 'rice':
min_croprev += (1.0*min(rice_prod_months)*ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))/30.0
max_croprev += (1.0*max(rice_prod_months)*ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))/30.0
else:
min_croprev += 1.0/365.0 * \
(ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))
max_croprev += 1.0/365.0 * \
(ex_rate*cost_param.est_net_rev *
(x[cost_param.crop_code]/cost_param.tot_tons))
return min_croprev, max_croprev
def netrevenue_values_to_province_od_nodes(province_ods_df,prov_communes,commune_sindex,netrevenue,
n_firms,agri_prop,prov_pop,prov_pop_sindex,nodes,sindex_nodes,prov_commune_center,
sindex_commune_center,node_id,object_id,exchange_rate):
"""Assign commune level netrevenue values to OD nodes in provinces
- Based on finding nearest nodes to village points with netrevenues as Origins
- And finding nearest commune centers as Destinations
Parameters
- province_ods_df - List of lists of Pandas dataframes
- prov_communes - GeoDataFrame of commune level statistics
- commune_sindex - Spatial index of communes
- netrevenue - String name of column for netrevenue of communes in VND millions
- nfirm - String name of column for numebr of firms in communes
- agri_prop - Stirng name of column for proportion of agriculture firms in communes
- prov_pop - GeoDataFrame of population points in Province
- prov_pop_sindex - Spatial index of population points in Province
- nodes - GeoDataFrame of province road nodes
- sindex_nodes - Spatial index of province road nodes
- prov_commune_center - GeoDataFrame of province commune center points
- sindex_commune_center - Spatial index of commune center points
- node_id - String name of Node ID column
- object_id - String name of commune ID column
- exchange_rate - Float value for exchange rate from VND million to USD
Outputs
province_ods_df - List of Lists of Pandas dataframes with columns:
- origin - Origin node ID
- destination - Destination node ID
- netrev_argi - Net revenue of agriculture firms
- netrev_noargi - Net revenue of non-agriculture firms
"""
# create new column in prov_communes with amount of villages
prov_communes['n_villages'] = prov_communes.geometry.apply(
lambda x: count_points_in_polygon(x, prov_pop_sindex))
prov_communes['netrev_village'] = exchange_rate * \
(prov_communes[netrevenue]*prov_communes[n_firms])/prov_communes['n_villages']
# also get the net revenue of the agriculture sector which is called nongnghiep
prov_communes['netrev_village_agri'] = 1.0/365.0 * \
(prov_communes[agri_prop]*prov_communes['netrev_village'])
prov_communes['netrev_village_noagri'] = 1.0/365.0 * \
(prov_communes['netrev_village'] - prov_communes['netrev_village_agri'])
# give each village a net revenue based on average per village in commune
prov_pop['netrev_agri'] = prov_pop.geometry.apply(lambda x: extract_value_from_gdf(
x, commune_sindex, prov_communes, 'netrev_village_agri'))
prov_pop['netrev_noagri'] = prov_pop.geometry.apply(lambda x: extract_value_from_gdf(
x, commune_sindex, prov_communes, 'netrev_village_noagri'))
# get nearest node in network for all start and end points
prov_pop['NEAREST_G_NODE'] = prov_pop.geometry.apply(
lambda x: get_nearest_node(x, sindex_nodes, nodes, node_id))
prov_pop['NEAREST_C_CENTER'] = prov_pop.geometry.apply(
lambda x: get_nearest_node(x, sindex_commune_center, prov_commune_center, object_id))
# find all OD pairs of the revenues
netrev_ods = netrev_od_pairs(prov_pop, prov_commune_center)
province_ods_df.append(netrev_ods)
return province_ods_df
def crop_values_to_province_od_nodes(province_ods_df,province_geom,calc_path,
crop_data_path,crop_names,nodes,sindex_nodes,prov_commune_center,sindex_commune_center,node_id,object_id):
"""Assign IFPRI crop values to OD nodes in provinces
- Based on finding nearest nodes to crop production sites as Origins
- And finding nearest commune centers as Destinations
Parameters
- province_ods_df - List of lists of Pandas dataframes
- province_geom - Shapely Geometry of province
- calc_path - Path to store intermediary calculations
- crop_data_path - Path to crop datasets
- crop_names - List of string of crop names in IFPRI datasets
- nodes - GeoDataFrame of province road nodes
- sindex_nodes - Spatial index of province road nodes
- prov_commune_center - GeoDataFrame of province commune center points
- sindex_commune_center - Spatial index of commune center points
- node_id - String name of Node ID column
- object_id - String name of commune ID column
Outputs
province_ods_df - List of Lists of Pandas dataframes with columns:
- origin - Origin node ID
- destination - Destination node ID
- crop - Tonnage values for the named crop
"""
# all the crop OD pairs
for file in os.listdir(crop_data_path):
if file.endswith(".tif") and 'spam_p' in file.lower().strip():
fpath = os.path.join(crop_data_path, file)
crop_name = [cr for cr in crop_names if cr in file.lower().strip()][0]
outCSVName = os.path.join(calc_path, 'crop_concentrations.csv')
subprocess.run(["gdal2xyz.py", '-csv', fpath, outCSVName])
# Load points and convert to geodataframe with coordinates
load_points = pd.read_csv(outCSVName, header=None, names=[
'x', 'y', 'tons'], index_col=None)
load_points = load_points[load_points['tons'] > 0]
geometry = [Point(xy) for xy in zip(load_points.x, load_points.y)]
load_points = load_points.drop(['x', 'y'], axis=1)
crop_points = gpd.GeoDataFrame(load_points, crs={'init': 'epsg:4326'}, geometry=geometry)
del load_points
# clip all to province
prov_crop = gdf_geom_clip(crop_points, province_geom)
if len(prov_crop.index) > 0:
prov_crop_sindex = prov_crop.sindex
prov_crop['NEAREST_G_NODE'] = prov_crop.geometry.apply(
lambda x: get_nearest_node(x, sindex_nodes, nodes, node_id))
prov_crop['NEAREST_C_CENTER'] = prov_crop.geometry.apply(
lambda x: get_nearest_node(x, sindex_commune_center, prov_commune_center, object_id))
crop_ods = crop_od_pairs(prov_crop, prov_commune_center, crop_name)
province_ods_df.append(crop_ods)
return province_ods_df
def main():
"""Pre-process provincial-scale OD
1. Specify the paths from where to read and write:
- Input data
- Intermediate calcuations data
- Output results
2. Supply input data and parameters
- Names of the Provinces: List of strings
- Exchange rate to convert 2012 Net revenue in million VND values to USD in 2016
- Names of crops in IFPRI crop data
- Names of months in Rice Atlas data
- Name of column for netrevenue of communes in VND millions
- Name of column for numebr of firms in communes
- Name of column for proportion of agriculture firms in communes
- Name of Node ID column
- Name of commune ID column
3. Give the paths to the input data files:
- Network nodes files
- IFPRI crop data files
- Rice Atlas data shapefile
- Province boundary and stats data shapefile
- Commune boundary and stats data shapefile
- Population points shapefile for locations of villages
- Commune center points shapefile
"""
data_path, calc_path, output_path = load_config()['paths']['data'], load_config()[
'paths']['calc'], load_config()['paths']['output']
# Supply input data and parameters
province_list = ['Lao Cai', 'Binh Dinh', 'Thanh Hoa']
exchange_rate = 1.05*(1000000/21000)
crop_names = ['rice', 'cash', 'cass', 'teas',
'maiz', 'rubb', 'swpo', 'acof', 'rcof', 'pepp']
crop_month_fields = ['P_Jan', 'P_Feb', 'P_Mar', 'P_Apr', 'P_May',
'P_Jun', 'P_Jul', 'P_Aug', 'P_Sep', 'P_Oct', 'P_Nov', 'P_Dec']
netrevenue = 'netrevenue'
n_firms = 'nfirm'
agri_prop = 'nongnghiep'
node_id = 'NODE_ID'
object_id = 'OBJECTID'
# Give the paths to the input data files
network_data_path = os.path.join(data_path,'post_processed_networks')
crop_data_path = os.path.join(data_path, 'Agriculture_crops', 'crop_data')
rice_month_file = os.path.join(data_path, 'rice_atlas_vietnam', 'rice_production.shp')
province_path = os.path.join(data_path, 'Vietnam_boundaries',
'boundaries_stats', 'province_level_stats.shp')
commune_path = os.path.join(data_path, 'Vietnam_boundaries',
'boundaries_stats', 'commune_level_stats.shp')
population_points_in = os.path.join(
data_path, 'Points_of_interest', 'population_points.shp')
commune_center_in = os.path.join(
data_path, 'Points_of_interest', 'commune_committees_points.shp')
# Specify the output files and paths to be created
output_dir = os.path.join(output_path, 'flow_ods')
if os.path.exists(output_dir) == False:
os.mkdir(output_dir)
flow_output_excel = os.path.join(
output_dir, 'province_roads_commune_center_flow_ods.xlsx')
excl_wrtr = | pd.ExcelWriter(flow_output_excel) | pandas.ExcelWriter |
#--------------------------------------------------------------- Imports
from dotenv import load_dotenv
import alpaca_trade_api as tradeapi
import os
from pathlib import Path
import string
import pandas as pd
import numpy as np
import seaborn as sns
import panel as pn
from panel.interact import interact, interactive, fixed, interact_manual
from panel import widgets
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import plotly.express as px
pn.extension('plotly')
from pytrends.request import TrendReq
#--------------------------------------------------------------- Environment
# Loads .env
load_dotenv()
# Sets Alpaca API key and secret
alpaca_key = os.getenv('ALPACA_API_KEY')
alpaca_secret = os.getenv('ALPACA_API_SECRET')
# Creates the Alpaca API object
alpaca = tradeapi.REST(alpaca_key, alpaca_secret, api_version = "v2")
timeframe = "1D"
start = pd.Timestamp('2016-05-26', tz = 'US/Pacific').isoformat()
end = pd.Timestamp('2021-06-6', tz = 'US/Pacific').isoformat()
#--------------------------------------------------------------- Global Variables
pytrend = TrendReq()
sectors = [
'Communications',
'Consumer Discretionary',
'Consumer Staples',
'Energy',
'Financial',
'Healthcare',
'Industrial',
'Information Technology',
'Materials',
'Real Estate',
'Utilities'
]
beta = ['Min', 'Max', 'Median', 'Mutual Fund']
z_field = ['Close', 'Volume']
sector_tickers = {
'Communications':
{'Min': 'VZ', 'Max': 'LYV', 'Median': 'TMUS', 'Mutual Fund': 'VOX'},
'Consumer Discretionary':
{'Min': 'NVR', 'Max': 'F', 'Median': 'HLT', 'Mutual Fund': 'VCR'},
'Consumer Staples':
{'Min': 'CLX', 'Max': 'SYY', 'Median': 'PM', 'Mutual Fund': 'VDC'},
'Energy':
{'Min': 'COG', 'Max': 'OXY', 'Median': 'SLB', 'Mutual Fund': 'VDE'},
'Financial':
{'Min': 'CBOE', 'Max': 'LNC', 'Median': 'BAC', 'Mutual Fund': 'VFH'},
'Healthcare':
{'Min': 'DGX', 'Max': 'ALGN', 'Median': 'CAH', 'Mutual Fund': 'VHT'},
'Industrial':
{'Min': 'DGX', 'Max': 'TDG', 'Median': 'DE', 'Mutual Fund': 'VIS'},
'Information Technology':
{'Min': 'ORCL', 'Max': 'ENPH', 'Median': 'NTAP', 'Mutual Fund': 'VGT'},
'Materials':
{'Min': 'NEM', 'Max': 'FCX', 'Median': 'AVY', 'Mutual Fund': 'VAW'},
'Real Estate':
{'Min': 'PSA', 'Max': 'SPG', 'Median': 'UDR', 'Mutual Fund': 'VNQ'},
'Utilities':
{'Min': 'ED', 'Max': 'AES', 'Median': 'SRE', 'Mutual Fund': 'VPU'}
}
member_picks = {
'Boomer': ['VDC', 'VNQ', 'VOX', 'VAW'],
'Stonks': ['GME', 'AMC', 'PSLV', 'BB'],
'Pro Gamer': ['AAPL', 'TSLA', 'AMC', 'WMT'],
'Real American': ['LMT', 'TAP', 'PM', 'HAL']
}
#--------------------------------------------------------------- Functions
# Generates Correlation Heatmap of Sector Mutual Funds & Index
def df_to_plotly(df):
return {'z': df.values.tolist(),
'x': df.columns.tolist(),
'y': df.index.tolist()}
@interact(Beta = beta)
def heatmap(Beta):
df = pd.DataFrame()
sp_file = Path('../Data/SP500.csv')
sp_df = pd.read_csv(sp_file, infer_datetime_format=True, parse_dates=True, index_col='Date')
df['SP500'] = sp_df['Close']
for k, v in sector_tickers.items():
ticker = sector_tickers[k][Beta]
file = Path('../Data/{}.csv'.format(ticker))
ticker_df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
df[k] = ticker_df['Close']
df = df.pct_change()
df.dropna(inplace = True)
corr = df.corr()
fig = go.Figure(data=go.Heatmap(
df_to_plotly(corr),
colorscale='blues'))
fig.update_layout(title = 'Heatmap',width=1000, height=500)
return fig
# Generates Candlestick Chart of Sector Ticker
@interact(Sector = sectors, Beta = beta)
def candlestick(Sector, Beta):
ticker = sector_tickers[Sector][Beta]
file = Path('../Data/{}.csv'.format(ticker))
df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
fig = go.Figure(data=[go.Candlestick(
x = df.index,
open = df['Open'],
high = df['High'],
low = df['Low'],
close = df['Close']
)])
fig.update_layout(title = ticker, width=1000, height=500)
return fig
# Generates Comparison Line Graph of Sector Ticker & SPY
@interact(Sector = sectors, Beta = beta)
def v_spy(Sector, Beta):
ticker = sector_tickers[Sector][Beta]
file = Path('../Data/{}.csv'.format(ticker))
df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
spy = pd.read_csv('../Data/SPY.csv', infer_datetime_format=True, parse_dates=True, index_col='Date')
fig = make_subplots()
trace1 = go.Scatter(
x = df.index,
y = df['Close'],
mode = 'lines',
name = ticker)
trace2 = go.Scatter(
x = spy.index,
y = spy['Close'],
mode = 'lines',
name = 'SPY')
fig.add_trace(trace1)
fig.add_trace(trace2)
fig.update_yaxes(range=[0, 700])
fig.update_layout(title = ticker + " versus SPY", width=1000, height=500)
return fig
# Generates Comparison Line Graph of Sector Ticker and its Google Search Interest
@interact(Sector = sectors, Beta = beta, Column = z_field)
def trend(Sector, Beta, Column):
ticker = sector_tickers[Sector][Beta]
file = Path('../Data/{}.csv'.format(ticker))
pytrend.build_payload(kw_list=[ticker], timeframe='today 5-y')
trend_df = pytrend.interest_over_time().rename_axis('Date')
trend_df.index = pd.to_datetime(trend_df.index)
df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
overlay = pd.merge(trend_df, df[Column], how = 'outer', left_index = True, right_index=True)
overlay = overlay.loc['2020-06-05':]
overlay.fillna(method = 'ffill', inplace = True)
fig = make_subplots(specs=[[{"secondary_y": True}]])
trace1 = go.Scatter(
x = overlay.index,
y = overlay[Column],
mode = 'lines',
name = Column)
trace2 = go.Scatter(
x = overlay.index,
y = overlay[ticker],
mode = 'lines',
name = 'Search Interest')
fig.add_trace(trace1, secondary_y=False)
fig.add_trace(trace2, secondary_y=True)
fig.update_yaxes(range=[overlay[Column].min()-(overlay[Column].std()*.2),overlay[Column].max()+(overlay[Column].std()*.2)], secondary_y=False)
fig.update_yaxes(range=[0,100], secondary_y=True)
fig.update_layout(title = ticker + " Closing Price vs Search Interest", width=1000, height=500)
return fig
# Builds Portfolio from 3 Tickers via Alpaca API, Displays Returns
@interact(Stock_1 = 'GOOG', Amount_1 = (0, 10000), Stock_2 = 'MSFT', Amount_2 = (0, 10000), Stock_3 = 'GME', Amount_3 = (0, 10000))
def api_call(Stock_1, Amount_1, Stock_2, Amount_2, Stock_3, Amount_3):
x1 = Stock_1.upper()
x2 = Stock_2.upper()
x3 = Stock_3.upper()
tickers = [x1, x2, x3]
df = alpaca.get_barset(
tickers,
timeframe,
start = start,
end = end,
limit = 1000
).df
close = pd.DataFrame()
close[x1] = df[x1]['close']
close[x2] = df[x2]['close']
close[x3] = df[x3]['close']
portfolio_df = pd.DataFrame()
shares1 = Amount_1 / df[x1]['close'][0]
shares2 = Amount_2 / df[x2]['close'][0]
shares3 = Amount_3 / df[x3]['close'][0]
portfolio_df[x1] = df[x1]['close'] * shares1
portfolio_df[x2] = df[x2]['close'] * shares2
portfolio_df[x3] = df[x3]['close'] * shares3
fig = px.line(portfolio_df, width=1000, height=500)
return fig
# Generates a Boxplot Showing Risk for Each Sector
@interact(Beta = beta)
def boxplot(Beta):
df = pd.DataFrame()
sp_file = Path('../Data/SP500.csv')
sp_df = pd.read_csv(sp_file, infer_datetime_format=True, parse_dates=True, index_col='Date')
df['SP500'] = sp_df['Close']
for k, v in sector_tickers.items():
ticker = sector_tickers[k][Beta]
file = Path('../Data/{}.csv'.format(ticker))
ticker_df = pd.read_csv(file, infer_datetime_format=True, parse_dates=True, index_col='Date')
df[k] = ticker_df['Close']
df = df.pct_change()
df.dropna(inplace = True)
fig = px.box(df, width=1000, height=500)
return fig
# Generates Bar Graph Displaying Sharpe Ratios for Portfolios Designed by Group Members
@interact(Portfolio = member_picks)
def sharpe(Portfolio):
df = alpaca.get_barset(
Portfolio, timeframe, start = start, end = end, limit = 504).df
df.dropna(inplace = True)
close = | pd.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
# run in py3 !!
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "1";
import tensorflow as tf
config = tf.ConfigProto()
# config.gpu_options.per_process_gpu_memory_fraction=0.5
config.gpu_options.allow_growth = True
tf.Session(config=config)
import numpy as np
from sklearn import preprocessing
import tensorflow as tf
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
import pandas as pd
from keras import backend as K
import keras.layers.convolutional as conv
from keras.layers import merge
from keras.wrappers.scikit_learn import KerasRegressor
from keras import utils
from keras.layers.pooling import MaxPooling1D, MaxPooling2D
from keras.layers import pooling
from keras.models import Sequential, Model
from keras.regularizers import l1, l2
from keras import layers
from keras.layers import Dense, Dropout, Activation, Flatten, Input, Convolution1D, Convolution2D, LSTM
from keras.optimizers import SGD, RMSprop
from keras.layers.normalization import BatchNormalization
from keras import initializers
from keras.callbacks import EarlyStopping
from keras import callbacks
from keras import backend as K
from keras.utils import to_categorical
from keras.callbacks import EarlyStopping, ModelCheckpoint, Callback
from keras.models import Model
from keras import initializers, layers
from keras.optimizers import SGD, Adadelta, Adam
from keras.regularizers import l1, l2
from keras import regularizers
import sys
sys.path.append('.')
from hist_figure import his_figures
if len(sys.argv) > 1:
prefix = sys.argv[1]
else:
prefix = time.time()
DATAPATH = '5fold/'
RESULT_PATH = './results/'
feature_num = 25
batch_num = 2
# batch_size = 32
batch_size = 512
SEQ_LENGTH = 20
STATEFUL = False
scaler = None # tmp, for fit_transform
# id,usage,date,com_date,week,month,year
# com_date,date,id,month,usage,week,year
def get_data(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print(path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['error'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['super', 'com_date', 'error', 'numbers']].copy()
# print(df_empty)
df_super = df_empty.ix[:, [0]]
df_com_date = df_empty.ix[:, [1]]
df_error = df_empty.ix[:, [2]]
df_numbers = df_empty.ix[:, [3]]
X_train_ = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train_ = df_error.as_matrix()
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_super = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [1]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_x.fit_transform(df_empty.ix[:, [3]])
df_numbers = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [2]])
df_error = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = np.column_stack((df_super, df_com_date, df_numbers, df_week, df_month))
Y_train = df_error.as_matrix()
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
y_arr = Y_train.T.tolist()
# print(y_arr)
try:
y_arr = ss_y.inverse_transform(y_arr)
#draw_error_line(y_arr[0], df)
#draw_error_bar(y_arr[0])
except Exception as e:
print(e)
if not issplit:
print('Xshape:' + str(X_train.shape))
print('Yshape:' + str(Y_train.shape))
X_train, X_test, Y_train, Y_test = train_test_split(X_train_, Y_train_, test_size=0.1, shuffle=False)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
return X_train, Y_train, X_test, Y_test, X_val, Y_val
else:
return split_CV(X_train, Y_train, sequence_length=sequence_length, stateful=False)
import datetime
def get_data_single_user(path_to_dataset='df_dh.csv', sequence_length=20, stateful=False, issplit=True):
fold_index = 1
###
dtypes = {'sub': 'float', 'super': 'float', 'error': 'float', 'com_date': 'int', 'week': 'str', 'month': 'str',
'year': 'str', 'numbers': 'int', 'log': 'float', 'id': 'str', 'usage': 'float'}
parse_dates = ['date']
print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$' + path_to_dataset)
df = pd.read_csv(DATAPATH + path_to_dataset, header=0, dtype=dtypes, parse_dates=parse_dates, encoding="utf-8")
# print(path_to_dataset)
print(df.columns)
df = df[df['usage'] >= 0]
# df_test = pd.read_csv(DATAPATH+"test"+str(fold_index)+".csv", header = 0, dtype=dtypes, parse_dates=parse_dates,encoding="utf-8")
def helper(x):
split = list(map(int, x.strip('[').strip(']').split(',')))
d = {}
for counter, value in enumerate(split):
k = str(len(split)) + "-" + str(counter)
d[k] = value
return d
# df_train_temp = df_train['week'].apply(helper).apply(pd.Series)
df_week = df['week'].apply(helper).apply(pd.Series).as_matrix() # 7
df_month = df['month'].apply(helper).apply(pd.Series).as_matrix() # 12
df_year = df['year'].apply(helper).apply(pd.Series).as_matrix() # 3
df_empty = df[['com_date', 'usage']].copy()
# print(df_empty)
df_com_date = df_empty.ix[:, [0]]
df_usage = df_empty.ix[:, [1]]
ss_x = preprocessing.MaxAbsScaler()
ss_y = preprocessing.MaxAbsScaler()
global scaler
scaler = ss_y
# ss_x = preprocessing.StandardScaler()
array_new = ss_x.fit_transform(df_empty.ix[:, [0]])
df_com_date = pd.DataFrame(array_new)
array_new = ss_y.fit_transform(df_empty.ix[:, [1]])
df_usage = pd.DataFrame(array_new)
df_week = ss_x.fit_transform(df_week)
df_week = pd.DataFrame(df_week)
df_month = ss_x.fit_transform(df_month)
df_month = pd.DataFrame(df_month)
X_train = np.column_stack((df_week, df_month))
Y_train = df_usage.as_matrix()
print(X_train)
print(Y_train.shape)
y_arr = Y_train.T.tolist()
# print(y_arr)
print(df)
y_arr = ss_y.inverse_transform(y_arr)
draw_error_line(y_arr[0], df)
draw_error_bar(y_arr[0])
# try:
#
# except Exception as e:
# print(e)
if not issplit:
return X_train, Y_train
else:
return split_CV(X_train, Y_train, sequence_length=sequence_length, stateful=False)
def inverse_xy_transform(scaler, *para):
temp = []
for i in para:
print(i.reshape(-1, 1))
temp.append(scaler.inverse_transform(i.reshape(-1, 1)))
return temp
def split_CV(X_train, Y_train, sequence_length=20, stateful=False):
"""return ndarray
"""
print(X_train)
print(Y_train.shape[0])
result_x = []
result_y = []
for index in range(len(Y_train) - sequence_length):
result_x.append(X_train[index: index + sequence_length])
# result_y.append(Y_train[index: index + sequence_length])
result_y.append(Y_train[index + sequence_length])
X_train = np.array(result_x)
Y_train = np.array(result_y)
print(X_train.shape) # (705, 20, 24)
print(Y_train.shape) # (705, 1)
print('##################################################################')
if stateful == True:
# X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.1,shuffle=False)
cp_X_train = X_train.copy()
cp_Y_train = Y_train.copy()
X_train = cp_X_train[:640, ...]
X_test = cp_X_train[640:, ...]
Y_train = cp_Y_train[:640, ...]
Y_test = cp_Y_train[640:, ...]
print(X_test.shape[0]) #
print(Y_test.shape[0]) #
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
print('##################################################################')
if stateful == False:
X_train, X_test, Y_train, Y_test = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
X_train, X_val, Y_train, Y_val = train_test_split(X_train, Y_train, test_size=0.1, shuffle=False)
# print(X_train.shape)#(705, 20, 24)
# print(Y_train.shape)#(705, 1)
# train_x_disorder = X_train.reshape((X_train.shape[0],X_train.shape[1] , feature_num))
# test_x_disorder = X_test.reshape((X_test.shape[0],X_test.shape[1], feature_num ))
# X_val = X_val.reshape((X_val.shape[0], X_val.shape[1] , feature_num))
# print(train_x_disorder.dtype)
train_y_disorder = Y_train.reshape(-1, 1)
test_y_disorder = Y_test.reshape(-1, 1)
Y_val = Y_val.reshape(-1, 1)
print(X_train.shape[0]) # (705, 20, 24)
print(Y_train.shape[0]) # (705, 1)
print('@' * 40)
# print(X_test)
print(train_y_disorder.shape)
print('@' * 40)
return [X_train, train_y_disorder, X_test, test_y_disorder, X_val, Y_val] # ndarray
def LSTM2(X_train):
model = Sequential()
# layers = [1, 50, 100, 1]
layers = [1, 30, 30, 1]
if STATEFUL == False:
model.add(LSTM(
layers[1],
input_shape=(X_train.shape[1], X_train.shape[2]),
stateful=STATEFUL,
return_sequences=True,
kernel_initializer='he_normal'
# , kernel_regularizer=l2(0.01)
))
else:
model.add(LSTM(
layers[1],
# input_shape=(X_train.shape[1], X_train.shape[2]),
batch_input_shape=(batch_size, X_train.shape[1], X_train.shape[2]),
stateful=STATEFUL,
return_sequences=True,
kernel_initializer='he_normal'
# , kernel_regularizer=l2(0.01)
))
# model.add(Dropout(0.2))
model.add(LSTM(
layers[2],
stateful=STATEFUL,
return_sequences=False,
kernel_initializer='he_normal'
# ,kernel_regularizer=l2(0.01)
))
model.add(Dropout(0.2))
# model.add(Flatten())
model.add(Dense(
layers[3]
, kernel_initializer='he_normal'
, kernel_regularizer=l2(0.01)
, activity_regularizer=l1(0.01)
))
model.add(BatchNormalization())
model.add(Activation("linear"))
start = time.time()
sgd = SGD(lr=1e-3, decay=1e-8, momentum=0.9, nesterov=True)
ada = Adadelta(lr=1e-4, rho=0.95, epsilon=1e-6)
rms = RMSprop(lr=0.001, rho=0.9, epsilon=1e-6, decay=1e-8)
adam = Adam(lr=1e-3)
# model.compile(loss="mse", optimizer=sgd)
# try:
# model.load_weights("./lstm.h5")
# except Exception as ke:
# print(str(ke))
model.compile(loss="mse", optimizer=adam)
print("Compilation Time : ", time.time() - start)
return model
def draw_error_bar(y_array):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(y_array)))
plt.bar(x, y_array, label='error')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('error bar')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + 'bar_error.png', dpi=300)
def draw_error_line(y_array, df):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(y_array)))
plt.plot(x, y_array, label='error')
x = list(range(len(df['error'])))
plt.plot(x, df['error'], label='error')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('error plot')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + 'line_error.png', dpi=300)
def draw_scatter(predicted, y_test, X_test, x_train, y_train, data_file):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(predicted)))
total_width, n = 0.8, 2
width = total_width / n
plt.bar(x, y_test.T[0], width=width, label='truth', fc='y')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, predicted, width=width, label='predict', fc='r')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('lstm')
# plt.show()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'bar_lstm.png', dpi=300)
fig = plt.figure()
plt.scatter(y_test.T[0], predicted)
# plt.plot(y_test.T[0], predicted, linewidth =0.3, color='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('truth')
plt.ylabel('predict')
# plt.show()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + '_scatter_lstm.png',
dpi=300)
def draw_line(predicted, y_test, X_test, x_train, y_train, data_file):
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
x = list(range(len(predicted)))
total_width, n = 0.8, 2
width = total_width / n
plt.bar(x, y_test.T[0], width=width, label='True', fc='y')
for i in range(len(x)):
x[i] = x[i] + width
plt.bar(x, predicted, width=width, label='Predicted', fc='r')
# plt.legend(handles=[line1, line2,line3])
plt.legend()
plt.title('lstm')
# plt.show()
axes.grid()
axes = fig.add_subplot(1, 1, 1)
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'bar_lstm.png', dpi=300)
fig = plt.figure()
plt.scatter(y_test.T[0], predicted)
# plt.plot(y_test.T[0], predicted, linewidth =0.3, color='red')
plt.xlim(0, 1)
plt.ylim(0, 1)
plt.xlabel('True')
plt.ylabel('Predicted')
# plt.show()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + '_scatter_lstm.png',
dpi=300)
fig = plt.figure()
axes = fig.add_subplot(1, 1, 1)
plt.plot(x, y_test.T[0], label='True')
for i in range(len(x)):
x[i] = x[i] + width
plt.plot(x, predicted, label='Predicted')
plt.legend()
axes.grid()
fig.tight_layout()
fig.savefig(RESULT_PATH + str(batch_size) + data_file + str(prefix) + 'line_lstm.png', dpi=300)
def stat_metrics(X_test, y_test, predicted):
predicted = np.reshape(predicted, y_test.shape[0])
train_error = np.abs(y_test - predicted)
mean_error = np.mean(train_error)
min_error = np.min(train_error)
max_error = np.max(train_error)
std_error = np.std(train_error)
print(predicted)
print(y_test.T[0])
print(np.mean(X_test))
print("#" * 20)
print(mean_error)
print(std_error)
print(max_error)
print(min_error)
print("#" * 20)
print(X_test[:, 1])
# 0.165861394194
# ####################
# 0.238853857898
# 0.177678269353
# 0.915951014937
# 5.2530646691e-0
pass
def run_regressor(model=LSTM2, sequence_length = SEQ_LENGTH, data=None, data_file='df_dh.csv', isload_model=True, testonly=False):
epochs = 1000
path_to_dataset = data_file
global mses
if data is None:
X_train, y_train, X_test, y_test, X_val, Y_val = get_data(sequence_length=sequence_length, stateful=STATEFUL,
path_to_dataset=data_file)
else:
X_train, y_train, X_test, y_test, X_val, Y_val = data
if STATEFUL:
X_test = X_test[:int(X_test.shape[0] / batch_size) * batch_size]
y_test = y_test[:int(y_test.shape[0] / batch_size) * batch_size]
estimator = KerasRegressor(build_fn=lambda x=X_train: model(x))
# if testonly == True:
# # predicted = model.predict(X_test, verbose=1,batch_size=batch_size)
# prediction = estimator.predict(X_test)
# stat_metrics(X_test, y_test, prediction)
# draw_scatter(predicted_arr[0], y_test, X_test, X_train, y_train, data_file)
# return
early_stopping = EarlyStopping(monitor='val_loss', verbose=1, patience=40)
checkpoint = ModelCheckpoint("./lstm.h5", monitor='val_loss', verbose=1, save_best_only=True,
save_weights_only=True)
################
hist = estimator.fit(X_train, y_train, validation_data=(X_val, Y_val), callbacks=[checkpoint, early_stopping],
epochs=epochs, batch_size=batch_size, verbose=1)
# prediction = estimator.predict(X_test)
score = mean_squared_error(y_test, estimator.predict(X_test))
estimator_score = estimator.score(X_test, y_test)
print(score)
mses.append(score)
prediction = estimator.predict(X_test)
print(prediction)
print(X_test)
print("##############################################")
# predicted_arr = prediction.T.tolist()
# print(predicted_arr)
global scaler
prediction_, y_test_, y_train_ = inverse_xy_transform(scaler, prediction, y_test, y_train)
predicted_df = pd.DataFrame(prediction_)
y_test_df = | pd.DataFrame(y_test_) | pandas.DataFrame |
# Copyright 2019 Verily Life Sciences LLC
#
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
import datetime
import unittest
from re import escape
from typing import Any, List, Optional, Sequence, Tuple, Union, cast # noqa: F401
import numpy as np
import pandas as pd
import six
from ddt import data, ddt, unpack
from purplequery.binary_expression import BinaryExpression
from purplequery.bq_abstract_syntax_tree import (EMPTY_CONTEXT, EMPTY_NODE, # noqa: F401
AbstractSyntaxTreeNode, EvaluatableNode,
EvaluationContext, Field, GroupedBy, _EmptyNode)
from purplequery.bq_types import (BQArray, BQScalarType, BQStructType, BQType, # noqa: F401
PythonType, TypedDataFrame, TypedSeries)
from purplequery.dataframe_node import QueryExpression, Select, TableReference
from purplequery.evaluatable_node import LiteralType # noqa: F401
from purplequery.evaluatable_node import (Case, Cast, Exists, Extract, FunctionCall, If, InCheck,
Not, NullCheck, Selector, UnaryNegation, Value)
from purplequery.grammar import select as select_rule
from purplequery.grammar import query_expression
from purplequery.query_helper import apply_rule
from purplequery.storage import DatasetTableContext
from purplequery.tokenizer import tokenize
@ddt
class EvaluatableNodeTest(unittest.TestCase):
def setUp(self):
# type: () -> None
self.small_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1], [2]], columns=['a']),
types=[BQScalarType.INTEGER]
)
}
}
})
self.large_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, 3], [1, 4, 3]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
def test_selector(self):
# type: () -> None
selector = Selector(Field(('a',)), 'field_alias')
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = selector.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [1, 2])
self.assertEqual(list(typed_series.dataframe), ['field_alias'])
self.assertEqual(typed_series.types, [BQScalarType.INTEGER])
def test_selector_group_by_success(self):
# type: () -> None
selector = Selector(Field(('c',)), EMPTY_NODE)
selector.position = 1
context = EvaluationContext(self.large_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
context.exclude_aggregation = True
updated_selector, = context.do_group_by([selector], [Field(('my_table', 'c'))])
typed_series = updated_selector.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [3])
@data((5, BQScalarType.INTEGER),
(1.23, BQScalarType.FLOAT),
("something", BQScalarType.STRING),
(True, BQScalarType.BOOLEAN),
(None, None))
@unpack
def test_value_repr(self, value, type_):
# type: (Optional[LiteralType], Optional[BQScalarType]) -> None
'''Check Value's string representation'''
node = Value(value, type_)
representation = 'Value(type_={}, value={})'.format(type_.__repr__(), value.__repr__())
self.assertEqual(node.__repr__(), representation)
@data((5, None),
(None, BQScalarType.INTEGER))
@unpack
def test_invalid_value(self, value, type_):
# type: (Optional[LiteralType], Optional[BQScalarType]) -> None
'''Check that None is only allowed as both value and type_ or neither.'''
with self.assertRaises(ValueError):
Value(value, type_)
def test_value_eval(self):
# type: () -> None
# A constant is repeated for each row in the context table.
value = Value(12345, BQScalarType.INTEGER)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')), 'foo')
typed_series = value.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [12345, 12345])
def test_field(self):
# type: () -> None
field = Field(('a',))
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = field.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [1, 2])
self.assertEqual(typed_series.series.name, 'a')
@data(
dict(function_name='sum', args=[Field(('a',))], expected_result=[3],
is_aggregating=True),
dict(function_name='max', args=[Field(('a',))], expected_result=[2],
is_aggregating=True),
dict(function_name='min', args=[Field(('a',))], expected_result=[1],
is_aggregating=True),
dict(function_name='concat',
args=[Value('foo', BQScalarType.STRING), Value('bar', BQScalarType.STRING)],
expected_result=['foobar'] * 2), # two copies to match length of context table.
dict(function_name='mod',
args=[Field(('a',)), Value(2, BQScalarType.INTEGER)],
expected_result=[1, 0]),
dict(function_name='mod',
args=[Value(1.0, BQScalarType.FLOAT), Value(2, BQScalarType.INTEGER)],
expected_result=[1.0, 1.0]),
dict(function_name='timestamp',
args=[Value("2019-04-22", BQScalarType.STRING)],
expected_result=[datetime.datetime(2019, 4, 22)] * 2), # two copies to match table len
)
@unpack
def test_functions(self, function_name, args, expected_result, is_aggregating=False):
# type: (str, List[EvaluatableNode], List[PythonType], bool) -> None
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
if is_aggregating:
context.do_group_by((), [])
result = FunctionCall.create(function_name, args, EMPTY_NODE).evaluate(context)
assert isinstance(result, TypedSeries)
self.assertEqual(
[result.type_.convert(elt) for elt in result.series],
expected_result)
def test_current_timestamp(self):
# type: () -> None
node, leftover = apply_rule(query_expression, tokenize(
'select current_timestamp(), a from unnest([struct(1 as a), struct(2), struct(3)])'))
assert isinstance(node, QueryExpression)
self.assertFalse(leftover)
result, _ = node.get_dataframe(DatasetTableContext({}))
table = cast(List[List[datetime.datetime]], result.to_list_of_lists())
self.assertEqual(len(table), 3)
# CURRENT_TIMESTAMP() returns a very recent timestamp
self.assertLess((datetime.datetime.now() - table[0][0]).seconds, 2)
# All rows have the same timestamp value.
self.assertEqual(table[0][0], table[1][0])
self.assertEqual(table[0][0], table[2][0])
@data(
# These expressions are ones whose EvaluatableNode subclass constructs a
# new pandas Series rather than computing on existing ones. See below:
# this runs the risk of constructing it with an incorrect index.
dict(query='select 10, c', expected_result=[[10, 6], [10, 9]]),
dict(query='select [a, b], c', expected_result=[[(4, 5), 6], [(7, 8), 9]]),
dict(query='select (a, b), c', expected_result=[[(4, 5), 6], [(7, 8), 9]]),
dict(query='select exists(select 1), c', expected_result=[[True, 6], [True, 9]]),
dict(query='select a in (1, 4), c', expected_result=[[True, 6], [False, 9]]),
dict(query='select row_number() over (), c', expected_result=[[1, 6], [2, 9]]),
dict(query='select current_timestamp() > timestamp("2019-01-01"), c',
expected_result=[[True, 6], [True, 9]]),
)
@unpack
def test_constructed_column_has_correct_index(self, query, expected_result):
# type: (str, List[List[int]]) -> None
'''Checks that manually constructed columns have the same index as the data.
A manually constructed column will usually have an index 0, 1, 2, ...
(e.g. pd.Series(['a', 'b', 'c']) has index 0, 1, 2).
The data may not; filtering, sorting or other changes might result in an index of
different numbers. If one column's index doesn't match the index of other columns,
it can't be compared or joined with them properly.
'''
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, -1], [4, 5, 6], [7, 8, 9]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
# Skip the first row of the table, so that the index of the table that
# the test queries operate on is [1, 2]; this makes sure that the index is
# different from the default index you would get for a two-row column,
# which would be [0, 1], to test that expressions are not incorrectly
# using that default index.
node, leftover = select_rule(tokenize(query + ' from (select * from my_table where c > 0)'))
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
self.assertEqual(list(result.dataframe.index), [1, 2])
def test_bad_function(self):
# type: () -> None
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
with self.assertRaisesRegexp(NotImplementedError, 'NOT_A_FUNCTION not implemented'):
FunctionCall.create('not_a_function', [], EMPTY_NODE).evaluate(context)
@data(
# Explore each aggregate function, along with a non-aggregate function to make sure we
# can compute both at once.
dict(selectors='sum(a), b+10', expected_result=[[6, 11], [5, 12]]),
dict(selectors='sum(a), 20+10', expected_result=[[6, 30], [5, 30]]),
dict(selectors='sum(a+1), b+10', expected_result=[[8, 11], [6, 12]]),
dict(selectors='max(a), b+10', expected_result=[[4, 11], [5, 12]]),
dict(selectors='min(a), b+10', expected_result=[[2, 11], [5, 12]]),
dict(selectors='count(a), b+10', expected_result=[[2, 11], [1, 12]]),
dict(selectors='count(*), b+10', expected_result=[[2, 11], [2, 12]]),
dict(selectors='array_agg(a), []', expected_result=[[(2, 4), ()], [(5, None), ()]]),
dict(selectors='array_agg(a), [b]', expected_result=[[(2, 4), (1,)], [(5, None), (2,)]]),
dict(selectors='array_agg(a), [7, 8]', expected_result=[[(2, 4), (7, 8)],
[(5, None), (7, 8)]]),
dict(selectors='array_agg(a), b+10', expected_result=[[(2, 4), 11], [(5, None), 12]]),
)
@unpack
def test_aggregate_functions_in_group_by(self, selectors, expected_result):
# type: (str, List[List[int]]) -> None
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[2, 1], [4, 1], [5, 2], [np.nan, 2]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table group by b'.format(selectors))
node, leftover = select_rule(tokens)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
@data(
dict(query='select sum(a + 1) + 2, count(*) + 3, 4 from my_table',
expected_result=[[11, 6, 4]]),
)
@unpack
def test_aggregate_functions_in_expressions(self, query, expected_result):
# type: (str, List[List[int]]) -> None
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[1], [2], [3]], columns=['a']),
types=[BQScalarType.INTEGER])}}})
node, leftover = select_rule(tokenize(query))
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
self.assertEqual(result.to_list_of_lists(), expected_result)
@data(
# Test all variations of creating a struct (typed, typeless, tuple),
# with and without named fields, with one field, and then with two
# fields.
dict(query='SELECT STRUCT<INTEGER>(1)',
expected_result=(1,),
expected_type=BQStructType([None], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT<a INTEGER>(1)',
expected_result=(1,),
expected_type=BQStructType(['a'], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT(1 AS a)',
expected_result=(1,),
expected_type=BQStructType(['a'], [BQScalarType.INTEGER])),
dict(query='SELECT STRUCT(1)',
expected_result=(1,),
expected_type=BQStructType([None], [BQScalarType.INTEGER])),
# Note: no test of single-element tuple syntax, as that would just be a
# parenthesized expression, there's no analogue to Python's trailing comma.
dict(query='SELECT STRUCT<INTEGER, STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<a INTEGER, STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<INTEGER, b STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT<a INTEGER, b STRING>(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1 AS a, "a" as b)',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1, "a" as b)',
expected_result=(1, 'a'),
expected_type=BQStructType([None, 'b'], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1 AS a, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType(['a', None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT STRUCT(1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
dict(query='SELECT (1, "a")',
expected_result=(1, 'a'),
expected_type=BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])),
)
@unpack
def test_struct_constant_expressions(self, query, expected_result, expected_type):
# type: (str, Tuple[Optional[int], ...], BQStructType) -> None
table_context = DatasetTableContext({})
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
self.assertEqual(result.types, [expected_type])
@data(
# Test all three struct syntaxes, selecting a column as one field, a
# constant as the other.
dict(query='SELECT (a, "a") FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType([None, None], [BQScalarType.INTEGER, BQScalarType.STRING])]),
dict(query='SELECT STRUCT(a as x, "a" as y) FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType(['x', 'y'], [BQScalarType.INTEGER, BQScalarType.STRING])]),
dict(query='SELECT STRUCT<x INTEGER, y STRING>(a, "a") FROM my_table',
expected_result=[[(1, 'a')], [(2, 'a')]],
expected_types=[
BQStructType(['x', 'y'], [BQScalarType.INTEGER, BQScalarType.STRING])]),
)
@unpack
def test_struct_field_and_constant(self, query, expected_result, expected_types):
# type: (str, List[List[Tuple[Optional[int], ...]]], Sequence[BQStructType]) -> None
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(self.small_table_context)
self.assertEqual(result.to_list_of_lists(), expected_result)
self.assertEqual(result.types, expected_types)
@data(
# Test combination types of arrays and structs.
dict(query='SELECT ([1], "a")',
expected_result=((1,), 'a'),
expected_type=BQStructType([None, None], [BQArray(BQScalarType.INTEGER),
BQScalarType.STRING])),
dict(query='SELECT STRUCT<x ARRAY<INTEGER>, y STRING>(ARRAY<INTEGER>[1], "a")',
expected_result=((1,), 'a'),
expected_type=BQStructType(['x', 'y'], [BQArray(BQScalarType.INTEGER),
BQScalarType.STRING])),
dict(query='SELECT [(1, "a")]',
expected_result=((1, 'a'), ),
expected_type=BQArray(BQStructType([None, None], [BQScalarType.INTEGER,
BQScalarType.STRING]))),
dict(query='SELECT [STRUCT<a INTEGER, b STRING>(1, "a"), (2, "b")]',
expected_result=((1, 'a'), (2, 'b')),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.INTEGER,
BQScalarType.STRING]))),
# Test that an array of structs merges and coerces the types of the
# structs.
dict(query='SELECT [STRUCT<a FLOAT, STRING>(1.0, "a"), STRUCT<INTEGER, b STRING>(2, "b")]',
expected_result=((1.0, 'a'), (2.0, 'b')),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.FLOAT,
BQScalarType.STRING]))),
dict(query='SELECT [STRUCT<a INTEGER, b ARRAY<STRING> >(1, ["a"]), (2, ["b", "c"])]',
expected_result=((1, ('a',)), (2, ('b', 'c'))),
expected_type=BQArray(BQStructType(['a', 'b'], [BQScalarType.INTEGER,
BQArray(BQScalarType.STRING)]))),
)
@unpack
def test_complex_types(self, query, expected_result, expected_type):
# type: (str, Tuple[Optional[int], ...], BQType) -> None
table_context = DatasetTableContext({})
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
self.assertEqual(result.types, [expected_type])
@data(
dict(query='SELECT ARRAY_AGG(a)',
expected_result=(1, 1, 2, None)),
dict(query='SELECT ARRAY_AGG(a RESPECT NULLS)',
expected_result=(1, 1, 2, None)),
dict(query='SELECT ARRAY_AGG(DISTINCT a)',
expected_result=(1, 2, None)),
dict(query='SELECT ARRAY_AGG(DISTINCT a RESPECT NULLS)',
expected_result=(1, 2, None)),
dict(query='SELECT ARRAY_AGG(a IGNORE NULLS)',
expected_result=(1, 1, 2)),
dict(query='SELECT ARRAY_AGG(DISTINCT a IGNORE NULLS)',
expected_result=(1, 2)),
)
@unpack
def test_array_agg_arguments(self, query, expected_result):
# type: (str, Tuple[Optional[int], ...]) -> None
table_context = DatasetTableContext(
{'p': {'d': {'t':
TypedDataFrame(pd.DataFrame([[1], [1], [2], [None]], columns=['a']),
types=[BQScalarType.INTEGER])}}})
node, leftover = select_rule(tokenize(query + ' FROM p.d.t'))
self.assertFalse(leftover)
assert isinstance(node, Select)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [[expected_result]])
@data(
dict(query='SELECT [1,2,"a"]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT STRUCT<INT64>(3.7)',
error='Struct field 1 has type .*FLOAT which does not coerce to .*INTEGER'),
dict(query='SELECT ARRAY<INT64>[3.7]',
error='Array specifies type .*INTEGER, incompatible with values of type .*FLOAT'),
dict(query='SELECT ARRAY<INT64>[1,2,"a"]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT ARRAY<string>[1,2]',
error='Cannot implicitly coerce the given types'),
dict(query='SELECT [[1]]',
error='Cannot create arrays of arrays'),
dict(query='SELECT [(1, 2), (3, 4, 5)]',
error='Cannot merge .* number of fields varies'),
dict(query='SELECT [STRUCT(1 as a, 2 as b), STRUCT(3 as x, 4 as b)]',
error='Cannot merge Structs; field names .* do not match'),
# same types in different orders can't merge.
dict(query='SELECT [(1, "a"), ("b", 2)]',
error='Cannot implicitly coerce the given types'),
# same names in different orders can't merge
dict(query='SELECT [STRUCT(1 as a, 2 as b), STRUCT(3 as b, 4 as a)]',
error='Cannot merge Structs; field names .* do not match'),
)
@unpack
def test_complex_type_errors(self, query, error):
# type: (str, str) -> None
node, leftover = select_rule(tokenize(query))
self.assertFalse(leftover)
assert isinstance(node, Select)
with self.assertRaisesRegexp(ValueError, error):
node.get_dataframe(self.small_table_context)
@data(
# Row number over whole dataset; order is not guaranteed
dict(selectors='row_number() over ()', expected_result=[[1], [2], [3], [4]]),
dict(selectors='row_number() over (order by a), a',
expected_result=[[1, 10], [2, 20], [3, 30], [4, 30]]),
dict(selectors='row_number() over (order by a asc), a',
expected_result=[[1, 10], [2, 20], [3, 30], [4, 30]]),
dict(selectors='row_number() over (order by a desc), a',
expected_result=[[4, 10], [3, 20], [2, 30], [1, 30]]),
dict(selectors='row_number() over (partition by b order by a), a',
expected_result=[[1, 10], [2, 20], [1, 30], [2, 30]]),
dict(selectors='sum(a) over (), a',
expected_result=[[90, 10], [90, 20], [90, 30], [90, 30]]),
dict(selectors='sum(a) over (partition by b), a',
expected_result=[[30, 10], [30, 20], [60, 30], [60, 30]]),
dict(selectors='count(*) over (), a',
expected_result=[[4, 10], [4, 20], [4, 30], [4, 30]]),
dict(selectors='count(a) over (), a',
expected_result=[[4, 10], [4, 20], [4, 30], [4, 30]]),
dict(selectors='count(*) over (partition by b), a',
expected_result=[[2, 10], [2, 20], [2, 30], [2, 30]]),
dict(selectors='count(a) over (partition by b), a',
expected_result=[[2, 10], [2, 20], [2, 30], [2, 30]]),
dict(selectors='sum(count(*)) over ()',
expected_result=[[4]]),
)
@unpack
def test_analytic_function(self, selectors, expected_result):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[20, 200], [10, 200], [30, 300], [30, 300]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table'.format(selectors))
node, leftover = select_rule(tokens)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
# Note: BQ docs say if ORDER BY clause (for the select as a whole) is not present, order of
# results is undefined, so we do not assert on the order.
six.assertCountEqual(self, result.to_list_of_lists(), expected_result)
@data(
dict(selectors='sum(count(*)) over (), count(*)',
expected_result=[[5, 2], [5, 3]]),
)
@unpack
def test_analytic_function_with_group_by(self, selectors, expected_result):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([[20, 2], [10, 2], [30, 3], [31, 3], [32, 3]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER])}}})
tokens = tokenize('select {} from my_table group by b'.format(selectors))
node, leftover = select_rule(tokens)
result, unused_table_name = node.get_dataframe(table_context)
self.assertFalse(leftover)
# Note: BQ docs say if ORDER BY clause (for the select as a whole) is not present, order of
# results is undefined, so we do not assert on the order.
six.assertCountEqual(self, result.to_list_of_lists(), expected_result)
def test_non_aggregate_function_in_group_by(self):
table_context = DatasetTableContext(
{'my_project': {'my_dataset': {'my_table': TypedDataFrame(
pd.DataFrame([['one', '1'], ['two', '1'], ['three', '2'], ['four', '2']],
columns=['a', 'b']),
types=[BQScalarType.STRING, BQScalarType.INTEGER])}}})
tokens = tokenize('select max(concat(b, "hi")) from my_table group by b')
node, leftover = select_rule(tokens)
self.assertFalse(leftover)
result, unused_table_name = node.get_dataframe(table_context)
self.assertEqual(result.to_list_of_lists(), [['1hi'], ['2hi']])
@data(
dict(count='COUNT(*)', expected_result=[[2]]),
dict(count='COUNT(c)', expected_result=[[2]]),
dict(count='COUNT(DISTINCT c)', expected_result=[[1]]),
dict(count='COUNT(b)', expected_result=[[2]]),
dict(count='COUNT(DISTINCT b)', expected_result=[[2]]),
dict(count='COUNT(a)', expected_result=[[1]]),
)
@unpack
def test_count(self, count, expected_result):
# type: (str, List[List[int]]) -> None
count_table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1, 2, 3], [None, 4, 3]], columns=['a', 'b', 'c']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
select, leftover = select_rule(tokenize('SELECT {} FROM my_table'.format(count)))
self.assertFalse(leftover)
assert isinstance(select, Select)
dataframe, unused_table_name = select.get_dataframe(count_table_context)
self.assertEqual(dataframe.to_list_of_lists(), expected_result)
@data(('IS_NULL', [True, False]), ('IS_NOT_NULL', [False, True]))
@unpack
def test_null_check(self, direction, result):
# type: (str, List[bool]) -> None
table_context = DatasetTableContext({
'my_project': {
'my_dataset': {
'my_table': TypedDataFrame(
pd.DataFrame([[1, None], [2, 3]], columns=['a', 'b']),
types=[BQScalarType.INTEGER, BQScalarType.INTEGER]
)
}
}
})
context = EvaluationContext(table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
expression = Field(('b',))
null_check = NullCheck(expression, direction)
typed_series = null_check.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), result)
@data(('IN', [True, False]), ('NOT_IN', [False, True]))
@unpack
def test_in_check(self, direction, result):
# type: (str, List[bool]) -> None
expression = Field(('a',))
elements = (Value(1, type_=BQScalarType.INTEGER), Value(3, type_=BQScalarType.INTEGER))
in_check = InCheck(expression, direction, elements)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = in_check.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), result)
@data(
(True, 0),
(False, 1)
)
@unpack
def test_if_empty_context(self, condition_bool, result):
# type: (bool, int) -> None
condition = Value(condition_bool, BQScalarType.BOOLEAN)
then = Value(0, BQScalarType.INTEGER)
else_ = Value(1, BQScalarType.INTEGER)
# IF [condition] THEN 0 ELSE 1
if_expression = If(condition, then, else_)
typed_series = if_expression.evaluate(EMPTY_CONTEXT)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [result])
def test_if(self):
condition = BinaryExpression(Field(('a',)), '>', Value(1, BQScalarType.INTEGER))
then = Value('yes', BQScalarType.STRING)
else_ = Value('no', BQScalarType.STRING)
# IF a > 1 THEN "yes" ELSE "no"
if_expression = If(condition, then, else_)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = if_expression.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), ['no', 'yes'])
def test_if_different_types(self):
condition = Value(True, BQScalarType.BOOLEAN)
then = Value('yes', BQScalarType.STRING)
else_ = Value(1, BQScalarType.INTEGER)
if_expression = If(condition, then, else_)
error = (r"Cannot implicitly coerce the given types: "
r"\(BQScalarType.STRING, BQScalarType.INTEGER\)")
with self.assertRaisesRegexp(ValueError, error):
if_expression.evaluate(EMPTY_CONTEXT)
def test_if_error(self):
condition = Value(5, BQScalarType.INTEGER)
then = Value(0, BQScalarType.INTEGER)
else_ = Value(1, BQScalarType.INTEGER)
if_expression = If(condition, then, else_)
error = escape("IF condition isn't boolean! Found: {}".format(
str(condition.evaluate(EMPTY_CONTEXT))))
with self.assertRaisesRegexp(ValueError, error):
if_expression.evaluate(EMPTY_CONTEXT)
def test_not(self):
expression = Value(True, BQScalarType.BOOLEAN)
not_expression = Not(expression)
typed_series = not_expression.evaluate(EMPTY_CONTEXT)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [False])
def test_not_type_error(self):
expression = Value(5, BQScalarType.INTEGER)
not_expression = Not(expression)
with self.assertRaisesRegexp(ValueError, ""):
not_expression.evaluate(EMPTY_CONTEXT)
@data(
(1, BQScalarType.INTEGER, -1),
(1.0, BQScalarType.FLOAT, -1.0),
)
@unpack
def test_unary_negation(self, initial_value, value_type, result_value):
# type: (Any, BQScalarType, Any) -> None
expression = Value(initial_value, value_type)
negation = UnaryNegation(expression)
typed_series = negation.evaluate(EMPTY_CONTEXT)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), [result_value])
@data(
("abc", BQScalarType.STRING),
(True, BQScalarType.BOOLEAN),
)
@unpack
def test_unary_negation_error(self, value, value_type):
# type: (Any, BQScalarType) -> None
expression = Value(value, value_type)
negation = UnaryNegation(expression)
error = ("UnaryNegation expression supports only integers and floats, got: {}"
.format(value_type))
with self.assertRaisesRegexp(TypeError, error):
negation.evaluate(EMPTY_CONTEXT)
@data(
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING)),
(Value(2, BQScalarType.INTEGER), Value("two", BQScalarType.STRING))],
else_=Value("other", BQScalarType.STRING),
result=["one", "two"]
),
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=Value("other", BQScalarType.STRING),
result=["one", "other"]
),
dict(
comparand=EMPTY_NODE,
whens=[(Value(True, BQScalarType.BOOLEAN), Value("yes", BQScalarType.STRING)),
(Value(False, BQScalarType.BOOLEAN), Value("no", BQScalarType.STRING))],
else_=EMPTY_NODE,
result=["yes", "yes"]
),
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=EMPTY_NODE,
result=["one", None]
),
)
@unpack
def test_case_with_comparand(
self, comparand, # type: Union[_EmptyNode, EvaluatableNode]
whens, # type: List[Tuple[AbstractSyntaxTreeNode, EvaluatableNode]]
else_, # type: EvaluatableNode
result # type: List[str]
):
# type: (...) -> None
case = Case(comparand, whens, else_)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
typed_series = case.evaluate(context)
assert isinstance(typed_series, TypedSeries)
self.assertEqual(list(typed_series.series), result)
def test_case_no_whens(self):
comparand = EMPTY_NODE
whens = []
else_ = EMPTY_NODE
error = "Must provide at least one WHEN for a CASE"
with self.assertRaisesRegexp(ValueError, error):
Case(comparand, whens, else_)
@data(
dict(
comparand=EMPTY_NODE,
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=EMPTY_NODE,
error="CASE condition isn't boolean! Found: {!r}".format(
TypedSeries(pd.Series([1, 1]), BQScalarType.INTEGER))
),
dict(
comparand=Field(('a',)),
whens=[(Value(1, BQScalarType.INTEGER), Value("one", BQScalarType.STRING))],
else_=Value(100, BQScalarType.INTEGER),
error="Cannot implicitly coerce the given types: "
"(BQScalarType.STRING, BQScalarType.INTEGER)"
),
)
@unpack
def test_case_error(self, comparand, # type: Union[_EmptyNode, EvaluatableNode]
whens, # type: List[Tuple[AbstractSyntaxTreeNode, EvaluatableNode]]
else_, # type: EvaluatableNode
error # type: str
):
# type: (...) -> None
case = Case(comparand, whens, else_)
context = EvaluationContext(self.small_table_context)
context.add_table_from_node(TableReference(('my_project', 'my_dataset', 'my_table')),
EMPTY_NODE)
with self.assertRaisesRegexp(ValueError, escape(error)):
case.evaluate(context)
@data(
dict(
value=Value(1, BQScalarType.INTEGER),
cast_type='STRING',
result='1'
),
dict(
value=Value(1, BQScalarType.INTEGER),
cast_type='FLOAT',
result=1.0
),
dict(
value=Value(1, BQScalarType.INTEGER),
cast_type='BOOLEAN',
result=True
),
dict(
value=Value(1.0, BQScalarType.FLOAT),
cast_type='STRING',
result='1.0'
),
dict(
value=Value(1.0, BQScalarType.FLOAT),
cast_type='INTEGER',
result=1
),
dict(
value=Value(True, BQScalarType.BOOLEAN),
cast_type='STRING',
result='True'
),
dict(
value=Value(True, BQScalarType.BOOLEAN),
cast_type='INTEGER',
result=1
),
dict(
value=Value('1', BQScalarType.STRING),
cast_type='INTEGER',
result=1
),
dict(
value=Value('1.0', BQScalarType.STRING),
cast_type='FLOAT',
result=1.0
),
dict(
value=Value('TRUE', BQScalarType.STRING),
cast_type='BOOLEAN',
result=True
),
dict(
value=Value('2019-12-01', BQScalarType.STRING),
cast_type='DATETIME',
result=datetime.datetime(2019, 12, 1),
),
dict(
value=Value('2019-12-01', BQScalarType.STRING),
cast_type='DATE',
result=datetime.date(2019, 12, 1),
),
dict(
value=Value('2019-12-01 01:02:03', BQScalarType.STRING),
cast_type='TIMESTAMP',
result=datetime.datetime(2019, 12, 1, 1, 2, 3),
),
dict(
value=Value(pd.Timestamp('2019-12-01'), BQScalarType.DATE),
cast_type='DATETIME',
result=datetime.datetime(2019, 12, 1),
),
dict(
value=Value( | pd.Timestamp('2019-12-01') | pandas.Timestamp |
import dash
from dash import html
from dash import dcc
import dash_bootstrap_components as dbc
from dash.dependencies import Input, Output
### plot and layout
import pandas as pd
import altair as alt
import geopandas as gpd
from si_prefix import si_format
data = | pd.read_csv("./data/processed/cleaned_salaries.csv") | pandas.read_csv |
import numpy as np
import pandas as pd
from sklearn import datasets
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn import model_selection
from sklearn import preprocessing
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from math import sqrt
import seaborn as sns
from scipy import stats
from math import sqrt,fabs
from keras import backend as K
import os
import h5py
import argparse
from keras.models import Sequential
from keras.layers import Dense, Dropout,RepeatVector
from keras.layers import LSTM, TimeDistributed, Masking
from keras.callbacks import Callback,ModelCheckpoint
from keras.models import load_model
from keras import regularizers
from keras.optimizers import SGD
import tensorflow as tf
import tensorboard
# tf.logging.set_verbosity(tf.logging.INFO)
# np.random.seed(1337)
# Prepare data
def rnn_data(data, time_steps, labels=False):
"""
creates new data frame based on previous observation
* example:
l = [1, 2, 3, 4, 5]
time_steps = 2
-> labels == False [[1, 2], [2, 3], [3, 4]] #Data frame for input with 2 timesteps
-> labels == True [3, 4, 5] # labels for predicting the next timestep
"""
rnn_df = []
for i in range(len(data) - time_steps):
if labels:
try:
rnn_df.append(data.iloc[i + time_steps].as_matrix())
except AttributeError:
rnn_df.append(data.iloc[i + time_steps])
else:
data_ = data.iloc[i: i + time_steps].as_matrix()
rnn_df.append(data_ if len(data_.shape) > 1 else [[i] for i in data_])
return np.array(rnn_df, dtype=np.float64)
def change_predictive_interval(train,label,predictiveinterval):
label = label[predictiveinterval-1:]
train = train[:len(label)]
return train,label
def load_csvdata(rawdata, time_steps,mode,scalardic):
data = rawdata
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
train_x = rnn_data(data['DO_mg'], time_steps, labels=False)
train_y = rnn_data(data['DO_mg'], time_steps, labels=True)
train_x_two = rnn_data(data['EC_uScm'], time_steps, labels=False)
train_y_two = rnn_data(data['EC_uScm'], time_steps, labels=True)
train_x_three = rnn_data(data['Temp_degC'], time_steps, labels=False)
train_y_three = rnn_data(data['Temp_degC'], time_steps, labels=True)
train_x_four = rnn_data(data['pH'], time_steps, labels=False)
train_y_four = rnn_data(data['pH'], time_steps, labels=True)
train_x_five = rnn_data(data['Chloraphylla_ugL'], time_steps, labels=False)
train_y_five = rnn_data(data['Chloraphylla_ugL'], time_steps, labels=True)
train_x = np.squeeze(train_x)
train_x_two = np.squeeze(train_x_two)
train_x_three = np.squeeze(train_x_three)
train_x_four = np.squeeze(train_x_four)
train_x_five = np.squeeze(train_x_five)
# Scale data (training set) to 0 mean and unit standard deviation.
if(mode=='train'):
scaler_do = scalardic['scaler_one']
scaler_ec = scalardic['scaler_two']
scaler_temp = scalardic['scaler_three']
scaler_ph = scalardic['scaler_four']
scaler_chlo = scalardic['scaler_five']
train_x = scaler_do.fit_transform(train_x)
train_x_two = scaler_ec.fit_transform(train_x_two)
train_x_three = scaler_temp.fit_transform(train_x_three)
train_x_four = scaler_ph.fit_transform(train_x_four)
train_x_five = scaler_chlo.fit_transform(train_x_five)
elif (mode=='test'):
scaler_do = scalardic['scaler_one']
scaler_ec = scalardic['scaler_two']
scaler_temp = scalardic['scaler_three']
scaler_ph = scalardic['scaler_four']
scaler_chlo = scalardic['scaler_five']
train_x = scaler_do.transform(train_x)
train_x_two = scaler_ec.transform(train_x_two)
train_x_three = scaler_temp.transform(train_x_three)
train_x_four = scaler_ph.transform(train_x_four)
train_x_five = scaler_chlo.transform(train_x_five)
all_train = np.stack((train_x, train_x_two, train_x_three,train_x_four,train_x_five), axis=-1)
all_train = all_train.reshape(-1,time_steps*5)
return dict(train=all_train,scalerone=scaler_do,scalertwo=scaler_ec,scalerthree=scaler_temp,scalerfour=scaler_ph,scalerfive=scaler_chlo), dict(trainyone=train_y,trainytwo=train_y_two,trainythree=train_y_three)
def data_together(filepath):
csvs = []
dfs = []
for subdir, dirs, files in os.walk(filepath):
for file in files:
# print os.path.join(subdir, file)
filepath = subdir + os.sep + file
if filepath.endswith(".hdf5"):
csvs.append(filepath)
return csvs
def generate_data(filepath, num_sample, timestamp, start, mode, scalardic):
"""
:param filepath: data set for the model
:param start: start row for training set, for training, start=0
:param num_sample: how many samples used for training set, in this case, 2928 samples from 1st Oct-30th Nov, two month
:param timestamp: timestamp used for LSTM
:return: training set, train_x and train_y
"""
dataset = | pd.read_csv(filepath) | pandas.read_csv |
import numpy as np
import pandas as pd
import math
from abc import ABC, abstractmethod
from scipy.interpolate import interp1d
from pydoc import locate
from raymon.globals import (
Buildable,
Serializable,
DataException,
)
N_SAMPLES = 500
from raymon.tags import Tag, CTYPE_TAGTYPES
class Stats(Serializable, Buildable, ABC):
@abstractmethod
def sample(self, n):
raise NotImplementedError
@abstractmethod
def report_drift(self, other, threshold):
raise NotImplementedError
@abstractmethod
def report_mean_diff(self, other, threshold, use_abs=False):
raise NotImplementedError
def report_invalid_diff(self, other, threshold):
if other.samplesize == 0:
return {"invalids": "_", "alert": False, "valid": False}
invalidsdiff = other.invalids - self.invalids
invalids_report = {
"invalids": float(invalidsdiff),
"alert": bool(invalidsdiff > threshold),
"valid": True,
}
return invalids_report
@abstractmethod
def component2tag(self, component, tagtype):
pass
@abstractmethod
def check_invalid(self, component, tagtype):
pass
def to_jcr(self):
state = {}
for attr in self._attrs:
state[attr] = getattr(self, attr)
data = {"class": self.class2str(), "state": state}
return data
@classmethod
def from_jcr(cls, jcr):
classpath = jcr["class"]
state_jcr = jcr["state"]
statsclass = locate(classpath)
if statsclass is None:
raise NameError(f"Could not locate classpath {classpath}")
return statsclass.from_jcr(state_jcr)
class NumericStats(Stats):
_attrs = ["min", "max", "mean", "std", "invalids", "percentiles", "samplesize"]
def __init__(self, min=None, max=None, mean=None, std=None, invalids=None, percentiles=None, samplesize=None):
self.min = min
self.max = max
self.mean = mean
self.std = std
self.invalids = invalids
self.percentiles = percentiles
self.samplesize = samplesize
"""MIN"""
@property
def min(self):
return self._min
@min.setter
def min(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.min cannot be NaN")
self._min = value
"""MAX"""
@property
def max(self):
return self._max
@max.setter
def max(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.max cannot be NaN")
self._max = value
"""MEAN"""
@property
def mean(self):
return self._mean
@mean.setter
def mean(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.mean cannot be NaN")
self._mean = value
"""STD"""
@property
def std(self):
return self._std
@std.setter
def std(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.std cannot be NaN")
self._std = value
"""PINV"""
@property
def invalids(self):
return self._invalids
@invalids.setter
def invalids(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.invalids cannot be NaN")
self._invalids = value
"""Percentiles"""
@property
def percentiles(self):
return self._percentiles
@percentiles.setter
def percentiles(self, value):
if value is None:
self._percentiles = None
elif len(value) == 101:
self._percentiles = list(value)
else:
raise DataException("stats.percentiles must be None or a list of length 101.")
"""Size of the sample that was analyzed"""
@property
def samplesize(self):
return self._samplesize
@samplesize.setter
def samplesize(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.samplesize cannot be NaN")
self._samplesize = value
@property
def range(self):
return self.max - self.min
"""Buildable Interface"""
def build(self, data, domain=None):
"""
Parameters
----------
data : [type]
[description]
domain : [type], optional
For numericstats, the domain is the range of values: (min, max). One or both can also be None. by default None
"""
data = np.array(data)
self.samplesize = len(data)
nan = np.isnan(data)
n_nans = len(data[nan])
data = data[~nan]
if domain and domain[0] is not None:
self.min = domain[0]
else:
self.min = float(np.min(data))
if domain and domain[1] is not None:
self.max = domain[1]
else:
self.max = float(np.max(data))
valid = (self.min <= data) & (self.max >= data)
n_invalids = len(data[~valid])
data = data[valid]
self.mean = float(data.mean())
self.std = float(data.std())
# Build cdf estimate based on percentiles
q = np.arange(start=0, stop=101, step=1)
self.percentiles = [float(a) for a in np.percentile(a=data, q=q, interpolation="higher")]
# Check the invalid
self.invalids = (n_invalids + n_nans) / self.samplesize
def is_built(self):
return all(getattr(self, attr) is not None for attr in self._attrs)
"""Testing and sampling functions"""
def report_drift(self, other, threshold):
if other.samplesize == 0:
return {"drift": -1, "drift_idx": -1, "alert": False, "valid": False}
p1 = self.percentiles
p2 = other.percentiles
data_all = np.concatenate([p1, p2])
# interp = np.sort(data_all)
# If certain values cause jumps of multiple percentiles, that value should be associated with the maximum percentile
cdf1 = np.searchsorted(p1, p1, side="right")
cdf2 = np.searchsorted(p2, p2, side="right")
interpolator_1 = interp1d(x=p1, y=cdf1, fill_value=(0, 100), bounds_error=False)
interpolator_2 = interp1d(x=p2, y=cdf2, fill_value=(0, 100), bounds_error=False)
interpolated_1 = interpolator_1(data_all)
interpolated_2 = interpolator_2(data_all)
drift = min(np.max(np.abs(interpolated_1 - interpolated_2)), 100) / 100
drift_idx = int(np.argmax(np.abs(interpolated_1 - interpolated_2)))
drift_report = {"drift": float(drift), "drift_idx": drift_idx, "alert": bool(drift > threshold), "valid": True}
return drift_report
def report_mean_diff(self, other, threshold, use_abs):
if other.samplesize == 0:
return {"mean": -1, "alert": False, "valid": False}
meandiff = other.mean - self.mean
meandiff_perc = meandiff / self.mean
if use_abs:
alert = bool(abs(meandiff_perc) > abs(threshold))
else:
alert = bool(meandiff_perc > threshold)
invalids_report = {
"mean": float(meandiff_perc),
"alert": alert,
"valid": True,
}
return invalids_report
def sample(self, n=N_SAMPLES, dtype="float"):
# Sample floats in range 0 - len(percentiles)
samples = np.random.random(n) * 100
# We will lineraly interpolate the sample between the percentiles, so get their integer floor and ceil percentile, and the relative diztance from the floor (between 0 and 1)
floor_percentiles = np.floor(samples).astype("uint8")
ceil_percentiles = np.ceil(samples).astype("uint8")
percentiles_alpha = samples - np.floor(samples)
percentiles = np.array(self.percentiles)
px = percentiles[floor_percentiles] * (1 - percentiles_alpha) + percentiles[ceil_percentiles] * (
percentiles_alpha
)
if dtype == "int":
return px.astype(np.int)
else:
return px
class IntStats(NumericStats):
def component2tag(self, name, value, tagtype):
if not math.isnan(value):
return Tag(name=name, value=int(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif math.isnan(value):
return Tag(name=tagname, value="Value NaN", type=tagtype)
elif value > self.max:
return Tag(name=tagname, value="UpperBoundError", type=tagtype)
elif value < self.min:
return Tag(name=tagname, value="LowerBoundError", type=tagtype)
else:
return None
@classmethod
def from_jcr(cls, data):
return cls(**data)
class FloatStats(NumericStats):
def component2tag(self, name, value, tagtype):
if not math.isnan(value):
return Tag(name=name, value=float(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif math.isnan(value):
return Tag(name=tagname, value="Value NaN", type=tagtype)
elif value > self.max:
return Tag(name=tagname, value="UpperBoundError", type=tagtype)
elif value < self.min:
return Tag(name=tagname, value="LowerBoundError", type=tagtype)
else:
return None
@classmethod
def from_jcr(cls, data):
return cls(**data)
class CategoricStats(Stats):
_attrs = ["frequencies", "invalids", "samplesize"]
def __init__(self, frequencies=None, invalids=None, samplesize=None):
self.frequencies = frequencies
self.invalids = invalids
self.samplesize = samplesize
"""frequencies"""
@property
def frequencies(self):
return self._frequencies
@frequencies.setter
def frequencies(self, value):
if value is None:
self._frequencies = value
elif isinstance(value, dict):
for key, keyvalue in value.items():
if keyvalue < 0:
raise DataException(f"Domain count for {key} is < 0")
self._frequencies = value
else:
raise DataException(f"stats.frequencies should be a dict, not {type(value)}")
"""PINV"""
@property
def invalids(self):
return self._invalids
@invalids.setter
def invalids(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.invalids cannot be NaN")
self._invalids = value
@property
def samplesize(self):
return self._samplesize
@samplesize.setter
def samplesize(self, value):
if value is not None and math.isnan(value):
raise DataException("stats.samplesize cannot be NaN")
self._samplesize = value
@property
def range(self):
return 1
def build(self, data, domain=None):
"""[summary]
Parameters
----------
data : [type]
[description]
domain : [type], optional
The domain of the featrue. A list or set, by default None
"""
data = pd.Series(data)
self.samplesize = len(data)
nan = pd.isna(data)
n_nans = len(data[nan])
data = data[~nan]
if domain:
domain = set(domain)
valid = data.isin(domain)
n_invalids = len(data[~valid])
data = data[valid]
else:
n_invalids = 0
self.frequencies = data.value_counts(normalize=True).to_dict()
self.invalids = (n_nans + n_invalids) / self.samplesize
def is_built(self):
return all(getattr(self, attr) is not None for attr in self._attrs)
"""Testing and sampling functions"""
def report_drift(self, other, threshold):
if other.samplesize == 0:
return {"drift": -1, "drift_idx": -1, "alert": False, "valid": False}
self_f, other_f, full_domain = equalize_domains(self.frequencies, other.frequencies)
f_sorted_self = []
f_sorted_other = []
for k in full_domain:
f_sorted_self.append(self_f[k])
f_sorted_other.append(other_f[k])
f_sorted_self = np.array(f_sorted_self)
f_sorted_other = np.array(f_sorted_other)
# Chebyshev
drift = min(np.max(np.abs(f_sorted_self - f_sorted_other)), 100)
drift_idx = full_domain[np.argmax(np.abs(f_sorted_self - f_sorted_other))]
drift_report = {"drift": float(drift), "drift_idx": drift_idx, "alert": bool(drift > threshold), "valid": True}
return drift_report
def report_mean_diff(self, other, threshold, use_abs=False):
return {"mean": -1, "alert": False, "valid": False}
def sample(self, n):
domain = sorted(list(self.frequencies.keys()))
# Let's be absolutely sure the domain is always in the same order
p = [self.frequencies[k] for k in domain]
return np.random.choice(a=domain, size=n, p=p)
def sample_counts(self, domain_freq, keys, n=N_SAMPLES):
domain = sorted(list(keys))
# Le's be absolutely sure the domain is always in the same order
p = [domain_freq.get(k, 0) for k in domain]
counts = (np.array(p) * (n - len(domain))).astype("int")
counts += 1 # make sure there are no zeros
return counts
def component2tag(self, name, value, tagtype):
if isinstance(value, str):
return Tag(name=name, value=str(value), type=tagtype)
else:
return None
def check_invalid(self, name, value, tagtype):
tagname = f"{name}-error"
if value is None:
return Tag(name=tagname, value="Value None", type=tagtype)
elif | pd.isnull(value) | pandas.isnull |
import pandas as pd
import numpy as np
from dash_website.utils.aws_loader import load_feather
from dash_website.utils.graphs import heatmap_by_sorted_dimensions
from dash_website import DOWNLOAD_CONFIG, GRAPH_SIZE
def get_data_upper_comparison(uni_or_multi, category):
return load_feather(
f"xwas/{uni_or_multi}_correlations/correlations/categories/correlations_{category}.feather"
).to_dict()
def get_graph_comparison(data_comparison_upper, data_comparison_lower):
table_correlations_upper, customdata_upper, _ = get_table_and_customdata(data_comparison_upper)
np.fill_diagonal(table_correlations_upper.values, np.nan)
table_correlations_lower, customdata_lower, _ = get_table_and_customdata(data_comparison_lower)
np.fill_diagonal(table_correlations_lower.values, np.nan)
subdimension_order = ["*", "FullBody", "Spine", "Hips", "Knees", "Scalars"]
sorted_dimensions = table_correlations_lower.sort_index(
axis=0,
level=1,
key=lambda subdimensions: list(map(lambda subdimension: subdimension_order.index(subdimension), subdimensions)),
).index
sorted_table_correlations_upper = table_correlations_upper.loc[sorted_dimensions, sorted_dimensions]
sorted_table_correlations_lower = table_correlations_lower.loc[sorted_dimensions, sorted_dimensions]
sorted_customdata_upper = customdata_upper.loc[sorted_dimensions, sorted_dimensions]
sorted_customdata_lower = customdata_lower.loc[sorted_dimensions, sorted_dimensions]
triangular_heatmap_values = np.triu(sorted_table_correlations_upper)
triangular_heatmap_values += np.tril(sorted_table_correlations_lower, k=-1)
triangular_heatmap = pd.DataFrame(
triangular_heatmap_values,
index=sorted_table_correlations_upper.index,
columns=sorted_table_correlations_upper.columns,
)
customdata_triangular_values = np.triu(sorted_customdata_upper)
customdata_triangular_values += np.tril(sorted_customdata_lower, k=-1)
customdata_triangular = pd.DataFrame(
customdata_triangular_values, index=sorted_customdata_upper.index, columns=sorted_customdata_upper.columns
)
hovertemplate_triangular = "Correlation: %{z:.3f} <br><br>Dimensions 1: %{x} <br>R²: %{customdata[0]:.3f} +- %{customdata[1]:.3f} <br>Dimensions 2: %{y}<br>R²: %{customdata[2]:.3f} +- %{customdata[3]:.3f} <br>Number variables: %{customdata[4]}<br><extra></extra>"
fig_triangular = heatmap_by_sorted_dimensions(triangular_heatmap, hovertemplate_triangular, customdata_triangular)
fig_triangular.update_layout(
yaxis={
"title": "Phenotypic correlation",
"showgrid": False,
"zeroline": False,
"title_font": {"size": 25},
"ticktext": [elem[1] for elem in triangular_heatmap.columns.values],
},
xaxis={
"title": "Genetics correlation",
"showgrid": False,
"zeroline": False,
"title_font": {"size": 25},
"ticktext": [elem[1] for elem in triangular_heatmap.index.values],
"tickangle": 90,
},
width=GRAPH_SIZE,
height=GRAPH_SIZE,
margin={"l": 0, "r": 0, "b": 0, "t": 0},
)
return fig_triangular
def get_table_and_customdata(data_comparison):
correlations_raw = pd.DataFrame(data_comparison).set_index(
["dimension_1", "subdimension_1", "r2_1", "r2_std_1", "dimension_2", "subdimension_2", "r2_2", "r2_std_2"]
)
correlations_raw.columns = pd.MultiIndex.from_tuples(
list(map(eval, correlations_raw.columns.tolist())), names=["method", "correlation_type"]
)
correlations = correlations_raw[[("union", "pearson"), ("union", "number_variables")]]
correlations.columns = ["correlation", "number_variables"]
correlations.reset_index(inplace=True)
correlations = correlations[
(correlations["dimension_1"] == "Musculoskeletal") & (correlations["dimension_2"] == "Musculoskeletal")
]
table_correlations = correlations.pivot(
index=["dimension_1", "subdimension_1"],
columns=["dimension_2", "subdimension_2"],
values="correlation",
).loc[ORDER_DIMENSIONS, ORDER_DIMENSIONS]
customdata_list = []
for customdata_item in ["r2_1", "r2_std_1", "r2_2", "r2_std_2", "number_variables"]:
customdata_list.append(
correlations.pivot(
index=["dimension_1", "subdimension_1"],
columns=["dimension_2", "subdimension_2"],
values=customdata_item,
)
.loc[ORDER_DIMENSIONS, ORDER_DIMENSIONS]
.values
)
stacked_customdata = list(map(list, np.dstack(customdata_list)))
customdata = | pd.DataFrame(np.nan, index=ORDER_DIMENSIONS, columns=ORDER_DIMENSIONS) | pandas.DataFrame |
# ###########################################################################
#
# CLOUDERA APPLIED MACHINE LEARNING PROTOTYPE (AMP)
# (C) Cloudera, Inc. 2020
# All rights reserved.
#
# Applicable Open Source License: Apache 2.0
#
# NOTE: Cloudera open source products are modular software products
# made up of hundreds of individual components, each of which was
# individually copyrighted. Each Cloudera open source product is a
# collective work under U.S. Copyright Law. Your license to use the
# collective work is as provided in your written agreement with
# Cloudera. Used apart from the collective work, this file is
# licensed for your use pursuant to the open source license
# identified above.
#
# This code is provided to you pursuant a written agreement with
# (i) Cloudera, Inc. or (ii) a third-party authorized to distribute
# this code. If you do not have a written agreement with Cloudera nor
# with an authorized and properly licensed third party, you do not
# have any rights to access nor to use this code.
#
# Absent a written agreement with Cloudera, Inc. (“Cloudera”) to the
# contrary, A) CLOUDERA PROVIDES THIS CODE TO YOU WITHOUT WARRANTIES OF ANY
# KIND; (B) CLOUDERA DISCLAIMS ANY AND ALL EXPRESS AND IMPLIED
# WARRANTIES WITH RESPECT TO THIS CODE, INCLUDING BUT NOT LIMITED TO
# IMPLIED WARRANTIES OF TITLE, NON-INFRINGEMENT, MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE; (C) CLOUDERA IS NOT LIABLE TO YOU,
# AND WILL NOT DEFEND, INDEMNIFY, NOR HOLD YOU HARMLESS FOR ANY CLAIMS
# ARISING FROM OR RELATED TO THE CODE; AND (D)WITH RESPECT TO YOUR EXERCISE
# OF ANY RIGHTS GRANTED TO YOU FOR THE CODE, CLOUDERA IS NOT LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, PUNITIVE OR
# CONSEQUENTIAL DAMAGES INCLUDING, BUT NOT LIMITED TO, DAMAGES
# RELATED TO LOST REVENUE, LOST PROFITS, LOSS OF INCOME, LOSS OF
# BUSINESS ADVANTAGE OR UNAVAILABILITY, OR LOSS OR CORRUPTION OF
# DATA.
#
# ###########################################################################
import argparse
from utils import data_utils
from utils.eval_utils import load_metrics
import numpy as np
import pandas as pd
from flask import Flask, jsonify, request, render_template
# from flask_cors import CORS, cross_origin
from models.ae import AutoencoderModel
import logging
import os
logging.basicConfig(level=logging.INFO)
def load_autoencoder():
ae_kwargs = {}
ae_kwargs["latent_dim"] = 2
ae_kwargs["hidden_dim"] = [15, 7]
ae_kwargs["epochs"] = 14
ae_kwargs["batch_size"] = 128
ae = AutoencoderModel(in_train.shape[1], **ae_kwargs)
ae.load_model()
metrics = load_metrics("metrics/" + ae.model_name + "/metrics.json")
return ae, metrics
def data_to_json(data, label):
data = pd.DataFrame(data, columns=list(col_names))
data["label"] = label
return data
# Point Flask to the front end directory
root_file_path = os.getcwd() + "/app/frontend"
print(root_file_path, os.getcwd())
# root_file_path = root_file_path.replace("backend", "frontend")
static_folder_root = os.path.join(root_file_path, "build")
print(static_folder_root)
app = Flask(__name__, static_url_path='',
static_folder=static_folder_root, template_folder=static_folder_root)
# cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
test_data_partition = "8020"
in_train, out_train, scaler, col_names = data_utils.load_kdd(
data_path="data/kdd/", dataset_type="train", partition=test_data_partition)
in_test, out_test, _, _ = data_utils.load_kdd(
data_path="data/kdd/", dataset_type="test", partition=test_data_partition, scaler=scaler)
ae, metrics = load_autoencoder()
@app.route('/')
def hello():
return render_template('index.html')
# @app.route('/build')
# def build():
# return app.send_static_file('build/index.html')
@app.route('/data')
def data():
data_size = request.args.get("n")
data_size = 10 if data_size == None else int(data_size)
inlier_size = int(0.8 * data_size)
outlier_size = int(0.3 * data_size)
in_liers = data_to_json(
in_test[np.random.randint(5, size=inlier_size), :], 0)
out_liers = data_to_json(
out_test[np.random.randint(5, size=outlier_size), :], 1)
response = | pd.concat([in_liers, out_liers], axis=0) | pandas.concat |
# -*- coding: utf-8 -*-
"""
This file combines all data loading methods into a central location.
Each type of data has a class that retrieves, processes, and checks it.
Each class has the following methods:
get - retrieves raw data from a source
adapt - transforms from the raw data to the common processed format
check - performs some format checking to see if the processed data looks right
process - does all the above
Additionally, each class then has source specific handlers.
E.g. there might be a get_url and a get_csv for a given class
and then an adapt_phe and an adapt_hps method to format the data
If pulled from an external source (e.g. url), the raw data can be stored
by setting the config['GenerateOutput']['storeInputs'] flag to be True.
These will be stored in the data/ folder
The processed output can be stored by setting the config['GenerateOutput']['storeProcessedInputs']
flag to be true, which will store the data in processed_data/
@authors: <NAME>, <NAME>
"""
import os
import sys
import yaml
import pandas as pd
import re
import requests
import io
import json
import zipfile
from http import HTTPStatus
from bs4 import BeautifulSoup
from collections import Counter
from datetime import datetime
import pickle
import h5py
import numpy as np
from covid import data as LancsData
# import model_spec
# DTYPE = model_spec.DTYPE
DTYPE = np.float64
def CovarData(config):
# Return data and covar data structs
data = {}
data['areas'] = AreaCodeData.process(config)
data['commute'] = InterLadCommuteData.process(config)
data['cases_tidy'] = CasesData.process(config)
data['cases_wide'] = data['cases_tidy'].pivot(index="lad19cd", columns="date", values="cases")
data['mobility'] = MobilityTrendData.process(config)
data['population'] = PopulationData.process(config)
data['tier'] = TierData.process(config)
# Check dimensions are consistent
check_aligned(data)
print('Data passes allignment check')
# put it into covar data form
covar_data = dict(
C=data['commute'].to_numpy().astype(DTYPE),
W=data['mobility'].to_numpy().astype(DTYPE),
N=data['population'].to_numpy().astype(DTYPE),
L=data['tier'].astype(DTYPE),
weekday=config['dates']['weekday'].astype(DTYPE),
)
return data, covar_data
class TierData:
def get(config):
"""
Retrieve an xarray DataArray of the tier data
"""
settings = config['TierData']
if settings['input'] == 'csv':
df = TierData.getCSV(settings['address'])
else:
invalidInput(settings['input'])
return df
def getCSV(file):
"""
Read TierData CSV from file
"""
return pd.read_csv(file)
def check(xarray, config):
"""
Check the data format
"""
return True
def adapt(df, config):
"""
Adapt the dataframe to the desired format.
"""
global_settings = config["Global"]
settings = config["TierData"]
# this key might not be stored in the config file
# if it's not, we need to grab it using AreaCodeData
if 'lad19cds' not in config:
_df = AreaCodeData.process(config)
areacodes = config["lad19cds"]
# Below is assuming inference_period dates
date_low, date_high = get_date_low_high(config)
if settings['format'].lower() == 'tidy':
xarray = TierData.adapt_xarray(df, date_low, date_high, areacodes, settings)
return xarray
def adapt_xarray(tiers, date_low, date_high, lads, settings):
"""
Adapt to a filtered xarray object
"""
tiers["date"] = pd.to_datetime(tiers["date"], format="%Y-%m-%d")
tiers["code"] = merge_lad_codes(tiers["code"])
# Separate out December tiers
date_mask = tiers["date"] > np.datetime64("2020-12-02")
tiers.loc[
date_mask & (tiers["tier"] == "three"),
"tier",
] = "dec_three"
tiers.loc[
date_mask & (tiers["tier"] == "two"),
"tier",
] = "dec_two"
tiers.loc[
date_mask & (tiers["tier"] == "one"),
"tier",
] = "dec_one"
# filter down to the lads
if len(lads) > 0:
tiers = tiers[tiers.code.isin(lads)]
# add in fake LADs to ensure all lockdown tiers are present for filtering
# xarray.loc does not like it when the values aren't present
# this seems to be the cleanest way
# we drop TESTLAD after filtering down
#lockdown_states = ["two", "three", "dec_two", "dec_three"]
lockdown_states = settings['lockdown_states']
for (i, t) in enumerate(lockdown_states):
tiers.loc[tiers.shape[0]+i+1] = ['TESTLAD','TEST','LAD',date_low,t]
index = pd.MultiIndex.from_frame(tiers[["date", "code", "tier"]])
index = index.sort_values()
index = index[~index.duplicated()]
ser = | pd.Series(1.0, index=index, name="value") | pandas.Series |
'''
'''
from importlib.util import find_spec
import numpy as np
import pandas as pd
from . import __validation as valid
from .__validation import ValidationError
def prep_X(data):
""" Ensures that data are in the correct format
Returns:
pd.DataFrame: formatted "X" data (test data)
"""
if not isinstance(data, pd.DataFrame):
if isinstance(data, pd.Series):
X = pd.DataFrame(data, columns=[data.name])
else:
X = | pd.DataFrame(data, columns=['X']) | pandas.DataFrame |
# --------------
import pandas as pd
import scipy.stats as stats
import math
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#Sample_Size
sample_size=2000
#Z_Critical Score
z_critical = stats.norm.ppf(q = 0.95)
# path [File location variable]
data=pd.read_csv(path)
#Code starts here
data_sample=data.sample(n=sample_size,random_state=0)
sample_mean=data_sample['installment'].mean()
sample_std=data_sample['installment'].std()
margin_of_error=z_critical*(sample_std/(sample_size**(1/2)))
confidence_interval=(sample_mean-margin_of_error,sample_mean+margin_of_error)
true_mean=data['installment'].mean()
if (true_mean in confidence_interval ):
print('Yes')
else:
print('No')
# --------------
import matplotlib.pyplot as plt
import numpy as np
#Different sample sizes to take
sample_size=np.array([20,50,100])
#Code starts here
#Creating different subplots
fig,axes=plt.subplots(3,1, figsize=(10,20))
#Running loop to iterate through rows
for i in range(len(sample_size)):
#Initialising a list
m=[]
#Loop to implement the no. of samples
for j in range(1000):
#Finding mean of a random sample
mean=data['installment'].sample(sample_size[i]).mean()
#Appending the mean to the list
m.append(mean)
#Converting the list to series
mean_series=pd.Series(m)
#Plotting the histogram for the series
axes[i].hist(mean_series, normed=True)
#Displaying the plot
plt.show()
#Code ends here
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
data['int.rate']= data['int.rate'].map(lambda x: x.strip('%')).astype('float64')
data['int.rate'] = data['int.rate'] / 100
z_statistic, p_value = ztest(x1=data[data['purpose']=='small_business']['int.rate'], value=data['int.rate'].mean(), alternative='larger')
print("z_statistic", z_statistic)
print("p_value", p_value)
# --------------
#Importing header files
from statsmodels.stats.weightstats import ztest
#Code starts here
z_statistic, p_value = ztest(x1=data[data['paid.back.loan']=='No']['installment'], x2=data[data['paid.back.loan']=='Yes']['installment'])
print(p_value)
# --------------
#Importing header files
from scipy.stats import chi2_contingency
#Critical value
critical_value = stats.chi2.ppf(q = 0.95, # Find the critical value for 95% confidence*
df = 6) # Df = number of variable categories(in purpose) - 1
#Code starts here
yes = data[data['paid.back.loan'] == 'Yes']['purpose'].value_counts()
no = data[data['paid.back.loan'] == 'No']['purpose'].value_counts()
observed = | pd.concat([yes.T, no.T], axis=1, keys=['Yes', 'No']) | pandas.concat |
import numpy as np
import pandas as pd
from anndata import AnnData
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.colors import rgb2hex
from matplotlib.patches import ConnectionPatch
from typing import Union, Optional
from scanpy.plotting._utils import savefig_or_show
from scFates.plot.dendrogram import dendrogram
from adjustText import adjust_text
from matplotlib import gridspec
from sklearn.metrics import pairwise_distances
from .. import get
def slide_cors(
adata: AnnData,
root_milestone,
milestones,
col: Union[None, list] = None,
basis: str = "umap",
win_keep: Union[None, list] = None,
frame_emb: bool = True,
focus=None,
top_focus=4,
labels: Union[None, tuple] = None,
fig_height: float = 6,
fontsize: int = 16,
fontsize_focus: int = 18,
point_size: int = 20,
show: Optional[bool] = None,
save: Union[str, bool, None] = None,
kwargs_text: dict = {},
kwargs_con: dict = {},
kwargs_adjust: dict = {},
**kwargs,
):
"""\
Plot results generated from tl.slide_cors.
Parameters
----------
adata
Annotated data matrix.
root_milestone
tip defining progenitor branch.
milestones
tips defining the progenies branches.
genesetA
plot correlation with custom geneset.
genesetB
plot correlation with custom geneset.
col
specify color for the two modules, by default according to their respective milestones.
basis
Name of the `obsm` basis to use.
win_keep
plot only a subset of windows.
frame_emb
add frame around emb plot.
focus
add on the right side a scatter focusing one defined window.
top_focus
highlight n top markers for each module, having the greatest distance to 0,0 coordinates.
labels
labels defining the two modules, named after the milestones if None, or 'A' and 'B' if less than two milestones is used.
fig_height
figure height.
fontsize
repulsion score font size.
fontsize_focus
fontsize of x and y labels in focus plot.
point_size
correlation plot point size.
show
show the plot.
save
save the plot.
kwargs_text
parameters for the text annotation of the labels.
kwargs_con
parameters passed on the ConnectionPatch linking the focused plot to the rest.
kwargs_adjust
parameters passed to adjust_text.
**kwargs
if `basis=dendro`, arguments passed to :func:`scFates.pl.dendrogram`
Returns
-------
If `show==False` a matrix of :class:`~matplotlib.axes.Axes`
"""
if "milestones_colors" not in adata.uns or len(adata.uns["milestones_colors"]) == 1:
from . import palette_tools
palette_tools._set_default_colors_for_categorical_obs(adata, "milestones")
mlsc = np.array(adata.uns["milestones_colors"].copy())
if mlsc.dtype == "float":
mlsc = list(map(rgb2hex, mlsc))
name = root_milestone + "->" + "<>".join(milestones)
freqs = adata.uns[name]["cell_freq"]
nwin = len(freqs)
corAB = adata.uns[name]["corAB"].copy()
if len(milestones) > 1:
genesetA = corAB[milestones[0]]["genesetA"].index
genesetB = corAB[milestones[0]]["genesetB"].index
corA = pd.concat(adata.uns[name]["corAB"][milestones[0]])
corB = pd.concat(adata.uns[name]["corAB"][milestones[1]])
if labels is None:
labelA, labelB = milestones if labels is None else labels
else:
genesetA = corAB["A"]["genesetA"].index
genesetB = corAB["A"]["genesetB"].index
corA = pd.concat(adata.uns[name]["corAB"]["A"])
corB = | pd.concat(adata.uns[name]["corAB"]["B"]) | pandas.concat |
# AUTOGENERATED! DO NOT EDIT! File to edit: DataPipelineNotebooks/3.PrepMLData.ipynb (unless otherwise specified).
__all__ = ['PrepML']
# Cell
import xarray as xr
import numpy as np
import pandas as pd
from joblib import Parallel, delayed
import time
from functools import partial
from datetime import datetime
import datetime
import os
import pickle
# Cell
class PrepML:
def __init__(self, data_root, interpolate=1, date_start='2015-11-01', date_end='2020-04-30', date_train_test_cutoff='2019-11-01'):
"""
Initialize the class
Keyword Arguments
data_root: the root path of the data folders which contains the 4.GFSFiltered1xInterpolationZarr
interpolate: the amount of interpolation applied in in the previous ParseGFS notebook (used for finding the correct input/output paths)
date_start: Earlist date to include in label set (default: '2015-11-01')
date_end: Latest date to include in label set (default: '2020-04-30')
date_train_test_cutoff: Date to use as a cutoff between the train and test labels (default: '2019-11-01')
"""
self.data_root = data_root
self.interpolation = interpolate
self.date_start = date_start
self.date_end = date_end
self.date_train_test_cutoff = date_train_test_cutoff
self.nc_path = data_root + '/3.GFSFiltered'+ str(self.interpolation) + 'xInterpolation/'
self.processed_path = data_root + '/4.GFSFiltered'+ str(self.interpolation) + 'xInterpolationZarr/'
self.path_to_labels = data_root + 'CleanedForecastsNWAC_CAIC_UAC.V1.2013-2020.csv'
self.ml_path = data_root + '/5.MLData'
self.date_col = 'Day1Date'
self.region_col = 'UnifiedRegion'
self.parsed_date_col = 'parsed_date'
if not os.path.exists(self.ml_path):
os.makedirs(self.ml_path)
#map states to regions for purposes of data lookup
self.regions = {
'Utah': ['Abajos', 'Logan', 'Moab', 'Ogden', 'Provo',
'Salt Lake', 'Skyline', 'Uintas'],
'Colorado': ['Grand Mesa Zone', 'Sangre de Cristo Range', 'Steamboat Zone', 'Front Range Zone',
'Vail Summit Zone', 'Sawatch Zone', 'Aspen Zone',
'North San Juan Mountains', 'South San Juan Mountains', 'Gunnison Zone'],
'Washington': ['Mt Hood', 'Olympics', 'Snoqualmie Pass', 'Stevens Pass',
'WA Cascades East, Central', 'WA Cascades East, North', 'WA Cascades East, South',
'WA Cascades West, Central', 'WA Cascades West, Mt Baker', 'WA Cascades West, South'
]
}
@staticmethod
def lookup_forecast_region(label_region):
"""
mapping between region names as the labels and the forecasts have slightly different standards
TODO: could add a unified mapping upstream in parseGFS files or in the label generation
Keyword Arguments:
label_region: region as defined in the labels file
returns the region as defined in the features
"""
if label_region == 'Mt Hood':
return 'Mt Hood'
elif label_region == 'Olympics':
return 'Olympics'
elif label_region == 'Cascade Pass - Snoq. Pass':
return 'Snoqualmie Pass'
elif label_region == 'Cascade Pass - Stevens Pass':
return 'Stevens Pass'
elif label_region == 'Cascade East - Central':
return 'WA Cascades East, Central'
elif label_region == 'Cascade East - North':
return 'WA Cascades East, North'
elif label_region == 'Cascade East - South':
return 'WA Cascades East, South'
elif label_region == 'Cascade West - Central':
return 'WA Cascades West, Central'
elif label_region == 'Cascade West - North':
return 'WA Cascades West, Mt Baker'
elif label_region == 'Cascade West - South':
return 'WA Cascades West, South'
elif label_region == 'Abajo':
return 'Abajos'
elif label_region == 'Logan':
return 'Logan'
elif label_region == 'Moab':
return 'Moab'
elif label_region == 'Ogden':
return 'Ogden'
elif label_region == 'Provo':
return 'Provo'
elif label_region == 'Salt Lake':
return 'Salt Lake'
elif label_region == 'Skyline':
return 'Skyline'
elif label_region == 'Uintas':
return 'Uintas'
elif label_region == 'Grand Mesa':
return 'Grand Mesa Zone'
elif label_region == 'Sangre de Cristo':
return 'Sangre de Cristo Range'
elif label_region == 'Steamboat & Flat Tops':
return 'Steamboat Zone'
elif label_region == 'Front Range':
return 'Front Range Zone'
elif label_region == 'Vail & Summit County':
return 'Vail Summit Zone'
elif label_region == 'Sawatch Range':
return 'Sawatch Zone'
elif label_region == 'Aspen':
return 'Aspen Zone'
elif label_region == 'Northern San Juan':
return 'North San Juan Mountains'
elif label_region == 'Southern San Juan':
return 'South San Juan Mountains'
elif label_region == 'Gunnison':
return 'Gunnison Zone'
else:
return 'Got region ' + label_region + ' but its an unknown region'
@staticmethod
def date_to_season(d):
"""
mapping of date to season
Keyword Arguments
d: datetime64
returns season indicator
"""
if d >= np.datetime64('2014-11-01') and d <= np.datetime64('2015-04-30'):
return (np.datetime64('2014-11-01'), '14-15')
elif d >= np.datetime64('2015-11-01') and d <= np.datetime64('2016-04-30'):
return (np.datetime64('2015-11-01'), '15-16')
elif d >= np.datetime64('2016-11-01') and d <= np.datetime64('2017-04-30'):
return (np.datetime64('2016-11-01'), '16-17')
elif d >= np.datetime64('2017-11-01') and d <= np.datetime64('2018-04-30'):
return (np.datetime64('2017-11-01'), '17-18')
elif d >= np.datetime64('2018-11-01') and d <= np.datetime64('2019-04-30'):
return (np.datetime64('2018-11-01'), '18-19')
elif d >= np.datetime64('2019-11-01') and d <= np.datetime64('2020-04-30'):
return (np.datetime64('2019-11-01'), '19-20')
else:
#print('Unknown season ' + str(d))
return (None,'Unknown')
def get_state_for_region(self, region):
"""
Returns the state for a given region
Keywork Arguments
region: region we want to lookup the state for
"""
for k in self.regions.keys():
if region in self.regions[k]:
return k
raise Exception('No region with name ' + region)
def prep_labels(self, overwrite_cache=True):
"""
Preps the data and lable sets in to two sets, train & test
Keyword Arguments
overwrite_cache: True indicates we want to recalculate the lat/lon combos, False indicates use the values if they exist in the cache file (otherwise calcualte and cache it)
returns the train & test sets
"""
#find the season
nc_date = np.datetime64(self.date_start)
nc_season = PrepML.date_to_season(nc_date)[1]
#maintaining this as a dict since the arrays are ragged and its more efficient this way
#storing one sample for each region to get the lat/lon layout
region_zones = []
region_data = {}
for region in self.regions.keys():
for r in self.regions[region]:
region_zones.append(r)
region_data[r] = xr.open_dataset(self.nc_path + nc_season + '/Region_' + r + '_' + pd.to_datetime(nc_date).strftime('%Y%m%d') + '.nc')
#Read in all the label data
self.labels = pd.read_csv(self.path_to_labels, low_memory=False,
dtype={'Day1Danger_OctagonAboveTreelineEast': 'object',
'Day1Danger_OctagonAboveTreelineNorth': 'object',
'Day1Danger_OctagonAboveTreelineNorthEast': 'object',
'Day1Danger_OctagonAboveTreelineNorthWest': 'object',
'Day1Danger_OctagonAboveTreelineSouth': 'object',
'Day1Danger_OctagonAboveTreelineSouthEast': 'object',
'Day1Danger_OctagonAboveTreelineSouthWest': 'object',
'Day1Danger_OctagonAboveTreelineWest': 'object',
'Day1Danger_OctagonBelowTreelineEast': 'object',
'Day1Danger_OctagonBelowTreelineNorth': 'object',
'Day1Danger_OctagonBelowTreelineNorthEast': 'object',
'Day1Danger_OctagonBelowTreelineNorthWest': 'object',
'Day1Danger_OctagonBelowTreelineSouth': 'object',
'Day1Danger_OctagonBelowTreelineSouthEast': 'object',
'Day1Danger_OctagonBelowTreelineSouthWest': 'object',
'Day1Danger_OctagonBelowTreelineWest': 'object',
'Day1Danger_OctagonNearTreelineEast': 'object',
'Day1Danger_OctagonNearTreelineNorth': 'object',
'Day1Danger_OctagonNearTreelineNorthEast': 'object',
'Day1Danger_OctagonNearTreelineNorthWest': 'object',
'Day1Danger_OctagonNearTreelineSouth': 'object',
'Day1Danger_OctagonNearTreelineSouthEast': 'object',
'Day1Danger_OctagonNearTreelineSouthWest': 'object',
'Day1Danger_OctagonNearTreelineWest': 'object',
'SpecialStatement': 'object',
'image_paths': 'object',
'image_types': 'object',
'image_urls': 'object'})
self.labels['parsed_date'] = pd.to_datetime(self.labels[self.date_col], format='%Y%m%d')
metadata_cols = [self.date_col, self.region_col]
#ensure we are only using label data for regions we are looking at
#return region_zones
self.labels[self.region_col] = self.labels.apply(lambda x : PrepML.lookup_forecast_region(x[self.region_col]), axis=1)
self.labels = self.labels[self.labels[self.region_col].isin(region_zones)]
self.labels = self.labels[self.labels[self.region_col]!='Unknown region']
#add a season column
tmp = pd.DataFrame.from_records(self.labels[self.parsed_date_col].apply(PrepML.date_to_season).reset_index(drop=True))
self.labels.reset_index(drop=True, inplace=True)
self.labels['season'] = tmp[1]
#some region/seasons have excessive errors in the data, remove those
self.labels = self.labels[self.labels['season'].isin(['15-16', '16-17', '17-18', '18-19', '19-20'])]
self.labels = self.labels[~self.labels.index.isin(self.labels[(self.labels['season']=='15-16') & (self.labels[self.region_col]=='Steamboat Zone')].index)]
self.labels = self.labels[~self.labels.index.isin(self.labels[(self.labels['season']=='16-17') & (self.labels[self.region_col]=='Front Range Zone')].index)]
lat_lon_union = pd.DataFrame()
lat_lon_path = self.processed_path + 'lat_lon_union.csv'
if overwrite_cache or not os.path.exists(lat_lon_path):
#find union of all lat/lon/region to just grids with values
#the process to filter the lat/lon is expensive but we need to do it here (1-5 seconds per region)
#as the helps the batch process select relevant data
for r in region_data.keys():
print(r)
region_df = region_data[r].stack(lat_lon = ('latitude', 'longitude')).lat_lon.to_dataframe()
tmp_df = pd.DataFrame.from_records(region_df['lat_lon'], columns=['latitude', 'longitude'])
indexes_to_drop = []
for index, row in tmp_df.iterrows():
#TODO: there might be a more efficient way than doing this one by one?
if 0 == np.count_nonzero(region_data[r].to_array().sel(latitude=row['latitude'], longitude=row['longitude']).stack(time_var = ('time', 'variable')).dropna(dim='time_var', how='all').values):
indexes_to_drop.append(index)
tmp_df.drop(indexes_to_drop, axis=0, inplace=True)
tmp_df[self.region_col] = r
lat_lon_union = pd.concat([lat_lon_union, tmp_df])
#cache the data
lat_lon_union.to_csv(lat_lon_path)
else:
#load the cached data
lat_lon_union = pd.read_csv(lat_lon_path,float_precision='round_trip')
#join in with the labels so we have a label per lat/lon pair
lat_lon_union = lat_lon_union.set_index(self.region_col, drop=False).join(self.labels.set_index(self.region_col, drop=False), how='left', lsuffix='left', rsuffix='right')
#define the split between train and test
date_min = np.datetime64(self.date_start)
date_max = np.datetime64(self.date_end)
train_date_cutoff = np.datetime64(self.date_train_test_cutoff)
#split the train/test data
labels_data_union = lat_lon_union[lat_lon_union[self.parsed_date_col] >= date_min]
labels_data_union = labels_data_union[labels_data_union[self.parsed_date_col] <= date_max]
#copy so we can delete the overall data and only keep the filtered
labels_data_train = labels_data_union[labels_data_union[self.parsed_date_col] <= train_date_cutoff].copy()
labels_data_test = labels_data_union[labels_data_union[self.parsed_date_col] > train_date_cutoff].copy()
labels_data_train.reset_index(inplace=True)
labels_data_test.reset_index(inplace=True)
return labels_data_train, labels_data_test
def augment_labels_with_trends(self, label_to_add_trend_info='Day1DangerAboveTreelineValue'):
raise NotImplementedError('Method is not fully implemented or tested')
#add extra labels which also allow us to have labels which indicate the trend in the avy direction
#the thought here is that predicting a rise or flat danger is usually easier than predicting when
#to lower the danger so seperating these in to seperate clases
#TODO: this should be dynamic based on label passed in, not hard coded to above treeline
labels_trends = pd.DataFrame()
for r in self.labels[self.region_col].unique():
for s in self.labels['season'].unique():
region_season_df = self.labels[self.labels['season']==s]
region_season_df = region_season_df[region_season_df[self.region_col]==r]
if(len(region_season_df) == 0):
continue
region_season_df.sort_values(by='parsed_date', inplace=True)
region_season_df.reset_index(inplace=True, drop=True)
region_season_df[label_to_add_trend_info] = region_season_df['Day1DangerAboveTreeline'].map({'Low':0, 'Moderate':1, 'Considerable':2, 'High':3})
region_season_df.loc[0,'Day1DangerAboveTreelineWithTrend'] = region_season_df.iloc[0]['Day1DangerAboveTreeline'] + '_Initial'
for i in range(1,len(region_season_df)):
prev = region_season_df.iloc[i-1]['Day1DangerAboveTreelineValue']
cur = region_season_df.loc[i,'Day1DangerAboveTreelineValue']
trend = '_Unknown'
if prev == cur:
trend = '_Flat'
elif prev < cur:
trend = '_Rising'
elif prev > cur:
trend = '_Falling'
region_season_df.loc[i,'Day1DangerAboveTreelineWithTrend'] = region_season_df.iloc[i]['Day1DangerAboveTreeline'] + trend
labels_trends = pd.concat([labels_trends,region_season_df])
assert(len(labels_trends)==len(self.labels))
self.labels = labels_trends
def get_data_zarr(self, region, lat, lon, lookback_days, date, variables=None):
"""
utility to get data for a specific point
Keyword Arguments
region: the region the point exists in
lat: the latitude of the point to lookup
lon: the longitude of the point to lookup
lookback_days: the number of days prior to the date to also return
date: the date which marks the end of the dataset (same date as the desired label)
variables: filter to just these variables (default: None indicates return all variables)
"""
#print(region + ' ' + str(lat) + ', ' + str(lon) + ' ' + str(date))
state = self.get_state_for_region(region)
earliest_data, season = PrepML.date_to_season(date)
path = self.processed_path + '/' + season + '/' + state + '/Region_' + region + '.zarr'
#print('*Opening file ' + path)
tmp_ds = xr.open_zarr(path, consolidated=True)
#filter to just the variables we want
#TODO: this may be more efficient if we use the open_zarr drop to not even read the variables
if variables is not None:
tmp_ds = tmp_ds.sel(variable=tmp_ds.variable.isin(variables))
start_day = date - np.timedelta64(lookback_days-1, 'D')
#print('start day ' + str(start_day))
tmp_ds = tmp_ds.sel(latitude=lat, longitude=lon, method='nearest').sel(time=slice(start_day, date))
date_values_pd = pd.date_range(start_day, periods=lookback_days, freq='D')
#reindex should fill missing values with NA
tmp_ds = tmp_ds.reindex({'time': date_values_pd})
tmp_ds = tmp_ds.reset_index(dims_or_levels='time', drop=True).load()
return tmp_ds
def get_data_zarr_batch(self, region, season, df, lookback_days, variables=None):
"""
utility to get data for a set of points
Keyword Arguments
region: the region the point exists in
season: the season the data is in
df: DataFrame of label rows to pull the data for (should all be from the same region and season)
lookback_days: the number of days prior to the date to also return
variables: filter to just these variables (default: None indicates return all variables)
"""
state = self.get_state_for_region(region)
path = self.processed_path + '/' + season + '/' + state + '/Region_' + region + '.zarr'
#print('*Opening file ' + path)
tmp_ds = xr.open_zarr(path, consolidated=True)
#finds the minimal set of values for the single zarr collection and then appends
#the individaul data to results
results = []
lats = df['latitude'].unique()
lons = df['longitude'].unique()
tmp_ds = xr.open_zarr(path, consolidated=True)
min_ds = tmp_ds.sel(latitude=lats, longitude=lons)
#filter to just the variables we want
#TODO: this may be more efficient if we use the open_zarr drop to not even read the variables
if variables is not None:
min_ds = min_ds.sel(variable=min_ds.variable.isin(variables))
for d in df.iterrows():
d = d[1]
date = d['parsed_date']
start_day = date - np.timedelta64(lookback_days-1, 'D')
result_df = min_ds.sel(latitude=d['latitude'], longitude=d['longitude']).sel(time=slice(start_day, date))
#print(str(d['latitude']) + ' ' + str(d['longitude']) + ' ' + str(start_day) + ' ' + str(date))
#return result_df
date_values_pd = pd.date_range(start_day, periods=lookback_days, freq='D')
#reindex should fill missing values with NA
result_df = result_df.reindex({'time': date_values_pd})
result_df = result_df.assign_coords({'sample': date.strftime('%Y%m%d') + ' ' + region}).expand_dims('sample')
results.append(result_df.reset_index(dims_or_levels='time', drop=True).load())
return results
def process_sample(self, iter_tuple, lookback_days, variables=None):
"""
Convienience method to take a tuple and pull the data for it from the zarr files
Keyword Arguments
iter_tuple:
lookback_days: the number of days prior to the date to also return
"""
row = iter_tuple[1]
d = row[self.parsed_date_col]
print('date : ' + str(d))
lat = row['latitude']
print('lat ' + str(lat))
lon = row['longitude']
print('lon ' + str(lon))
reg = row[self.region_col]
print('reg ' + reg)
ds = self.get_data_zarr(reg, lat, lon, lookback_days, d, variables)
#print("actual data")
if ds.time.shape[0] != lookback_days:
print(ds)
print('Need to drop! Error, incorrect shape ' + str(ds.time.shape[0]) + ' on time ' + str(d))
return (ds)
def process_sample2(self, iter_tuple, df, lookback_days, variables):
region = iter_tuple[1]['UnifiedRegion']
season = iter_tuple[1]['season']
df_r = df[df['UnifiedRegion']==region]
df_r_s = df_r[df_r['season']==season]
return self.get_data_zarr_batch(region=region,
season=season,
df=df_r_s,
lookback_days=lookback_days,
variables=variables)
def get_xr_batch(self,
labels,
lookback_days=14,
batch_size=64,
y_column='Day1DangerAboveTreeline',
label_values=['Low', 'Moderate', 'Considerable', 'High'],
oversample={'Low':True, 'Moderate':False, 'Considerable':False, 'High':True},
random_state=1,
variables = None,
n_jobs=-1):
"""
Primary method to take a set of labels and pull the data for it
the data is large so generally this needs to be done it batches
and then stored on disk
For a set of labels and a target column from the labels set create the ML data
Keyword Arguments
labels: the set of labels we will randomly choose from
lookback_days: the number of days prior to the date in the label to also return which defines the timeseries (default: 14)
batch_size: the size of the data batch to return (default: 64)
y_column: the column in the label set to use as the label (default: Day1DangerAboveTreeline)
label_values: possible values for the y label (default: ['Low', 'Moderate', 'Considerable', 'High'])
oversample: dictionary defining which labels from the label_values set to apply naive oversampling to (default: {'Low':True, 'Moderate':False, 'Considerable':False, 'High':True})
random_state: define a state to force datasets to be returned in a reproducable fashion (deafault: 1)
varaibles: variables to include (default: None which indicates include all variables)
n_jobs: number of processes to use (default: -1)
"""
print('Getting a batch for label ' + y_column)
labels_data = labels
X = None
y = None
first = True
first_y = True
num_in_place = 0
error_files = []
while num_in_place < batch_size:
if not first:
#if we didn't meet the full batch size
#continue appending until its full
#if num_in_place % 5 == 0:
print('Filling remaining have ' + str(num_in_place))
sample_size = batch_size-num_in_place
if sample_size < len(label_values):
sample_size = len(label_values)
else:
sample_size = batch_size
batch_lookups = []
size = int(sample_size/len(label_values))
#copy this so we can modify label_values during the loop without affecting the iteration
label_iter = label_values.copy()
for l in label_iter:
print(' on label: ' + l + ' with samplesize: ' + str(size))
label_len = len(labels_data[labels_data[y_column]==l])
print(' len: ' + str(label_len))
if label_len == 0:
#we don't have any more of this label, remove it from the list so we can continue efficiently
print(' No more values for label: ' + l)
label_values.remove(l)
continue
label_slice = labels_data[labels_data[y_column]==l]
#ensure the propose sample is larger than the available values
pick_size = size
if len(label_slice) < pick_size:
pick_size = len(label_slice)
if pick_size > 0:
batch_lookups.append(label_slice.sample(pick_size, random_state=random_state))
if not oversample[l]:
labels_data = labels_data.drop(batch_lookups[-1].index, axis=0)
#sample frac=1 causes the data to be shuffled
if len(batch_lookups) == 0:
#no more data left
break
batch_lookup = pd.concat(batch_lookups).sample(frac=1, random_state=random_state)
#print('lookup shape: ' + str(batch_lookup.shape))
batch_lookup.reset_index(inplace=True, drop=True)
print('have n_jobs ' + str(n_jobs))
tuples = batch_lookup[['UnifiedRegion', 'season']].drop_duplicates()
func = partial(self.process_sample2, df=batch_lookup, lookback_days=lookback_days, variables=variables)
data2 = Parallel(n_jobs=n_jobs)(map(delayed(func), tuples.iterrows()))
data = [item for sublist in data2 for item in sublist]
if first and len(data) > 0:
X = xr.concat(data, dim='sample')
y = batch_lookup
first = False
elif not first and len(data) > 0:
X_t = xr.concat(data, dim='sample')
X = xr.concat([X, X_t], dim='sample')#, coords='all', compat='override')
y = pd.concat([y, batch_lookup], axis=0)
num_in_place = y.shape[0]
X = X.sortby(['sample', 'latitude', 'longitude'])
y['sample'] = y['parsed_date'].dt.strftime('%Y%m%d') + ': ' + y['UnifiedRegion']
y = y.sort_values(['sample', 'latitude', 'longitude']).reset_index(drop=True)
return X, y, labels_data
@staticmethod
def prepare_batch_simple(X, y):
"""
ensure, X and y indexes are aligned
Keyword Arguments
X: The X dataframe
y: the y dataframe
"""
X = X.sortby(['sample', 'latitude', 'longitude'])
sample = y.apply(lambda row: '{}: {}'.format(row['parsed_date'], row['UnifiedRegion']), axis=1)
y['sample'] = sample
y = y.set_index(['sample', 'latitude', 'longitude'])
y.sort_index(inplace=True)
y.reset_index(drop=False, inplace=True)
return X, y
#def cache_batches(self, labels, batch_size=64, total_rows=6400, train_or_test='train', lookback_days=14, n_jobs=14):
"""
method to enable batches to be generated based on total amount of data as well as batch size
batches stores as zarr & parquet
Keyword Arguments
labels: the set of labels to choose from
batch_size: the number of samples to cache in a single batch (default: 64)
total_rows: the total number of rows to cache made up of multiple batches (default: 6400)
train_or_test: is this a train or test set--used in the file label (default: train)
lookback_days: number of days prior to the label date to include in the timeseries (default: 14)
n_jobs: number of processes to use (default: 14)
Returns: remaining labels (labels which weren't used in the dataset creation)
"""
#remaining_labels = labels
#for i in range(0, total_rows, batch_size):
# print(str(datetime.datetime.now()) + ' On ' + str(i) + ' of ' + str(total_rows))
# X, y, remaining_labels = self.get_xr_batch(remaining_labels,
# lookback_days=lookback_days,
# batch_size=batch_size,
# n_jobs=n_jobs)
# X.to_zarr(self.ml_path + 'X_' + train_or_test + '_' + str(i/batch_size) + '.zarr')
# y.to_parquet(self.ml_path + 'y_' + train_or_test + '_' + str(i/batch_size) + '.parquet')
#return remaining_labels
#def cache_batches_np(self,
# labels,
# batch_size=50,
# total_rows=10000,
# train_or_test='train',
# lookback_days=180,
# y_column='Day1DangerAboveTreeline',
# label_values=['Low', 'Moderate', 'Considerable', 'High'],
# oversample={'Low':True, 'Moderate':False, 'Considerable':False, 'High':True},
# variables = None,
# n_jobs=14):
"""
method to enable batches to be generated based on total amount of data as well as batch size
batches returned for further processing
Keyword Arguments
labels: the set of labels to choose from
batch_size: the number of samples to cache in a single batch (default: 64)
total_rows: the total number of rows to cache made up of multiple batches (default: 6400)
train_or_test: is this a train or test set--used in the file label (default: train)
lookback_days: number of days prior to the label date to include in the timeseries (default: 14)
y_column: the column in the label set to use as the label (default: Day1DangerAboveTreeline)
label_values: possible values for the y label (default: ['Low', 'Moderate', 'Considerable', 'High'])
oversample: dictionary defining which labels from the label_values set to apply naive oversampling to (default: {'Low':True, 'Moderate':False, 'Considerable':False, 'High':True})
varaibles: variables to include (default: None which indicates include all variables)
n_jobs: number of processes to use (default: 14)
Returns: tuple containing the batch *X,y) and remaining labels (labels which weren't used in the dataset creation)
"""
# remaining_labels = labels
# Xs = []
# ys = []
# for i in range(0, total_rows, batch_size):
# print(str(datetime.datetime.now()) + ' *On ' + str(i) + ' of ' + str(total_rows))
# X, y, remaining_labels = self.get_xr_batch(remaining_labels,
# lookback_days=lookback_days,
# batch_size=batch_size,
# y_column=y_column,
# label_values=label_values,
# oversample=oversample,
# variables=variables,
# n_jobs=n_jobs)
# Xs.append(X)
# ys.append(y)
#
#
# X = xr.concat(Xs, dim='sample')
# y = pd.concat(ys, axis=0)
# return PrepML.prepare_batch_simple(X, y), remaining_labels
#TODO: derive lookback_days from the input set
#TODO: only write out one y file per X file
def create_memmapped(self,
remaining_labels,
variables,
train_or_test = 'train',
num_rows = 10000,
lookback_days=180,
batch=0,
batch_size=500,
y_column='Day1DangerAboveTreeline',
label_values=['Low', 'Moderate', 'Considerable', 'High'],
oversample={'Low':True, 'Moderate':False, 'Considerable':False, 'High':True},
file_label='',
n_jobs=14):
"""
Generate a set of batches and store them in a memmapped numpy array
this is the technique used to prep data for timeseriesai notebook
Will store a single numpy X file in the ML directory as well as several y parquet files (one per batch size)
Keyword Arguments
remaining_labels: the set of labels to draw from
variables: the variables to include, required
train_or_test: is this a train or test set--used in the file label (default: train)
num_variables: number of variables in the X set (default: 1131)
num_rows: total number of rows to store in the file (deafult: 10000)
lookback_days: number of days before the label date to include in the timeseries (default: 180)
batch: batch number to start in (default: 0) used in case you are generating multiple files
batch_size: number of rows to process at once to accomodate memory limitations (default: 500)
y_column: the column in the label set to use as the label (default: Day1DangerAboveTreeline)
label_values: possible values for the y label (default: ['Low', 'Moderate', 'Considerable', 'High'])
oversample: dictionary defining which labels from the label_values set to apply naive oversampling to (default: {'Low':True, 'Moderate':False, 'Considerable':False, 'High':True})
file_label: optional label for files to distinguish different datasets
n_jobs: number of parallel jobs to run
"""
num_variables = len(variables)
# Save a small empty array
X_temp_fn = self.ml_path + '/temp_X.npy'
np.save(X_temp_fn, np.empty(1))
# Create a np.memmap with desired dtypes and shape of the large array you want to save.
# It's just a placeholder that doesn't contain any data
X_fn = self.ml_path + '/X' + train_or_test + '_batch_' + str(batch) + '_' + file_label + '_on_disk.npy'
X = np.memmap(X_temp_fn, dtype='float32', shape=(num_rows, num_variables, lookback_days))
# We are going to create a loop to fill in the np.memmap
start = 0
y = pd.DataFrame()
for i in range(0, num_rows, batch_size):
print('On ' + str(i) + ' of ' + str(num_rows))
# You now grab a chunk of your data that fits in memory
# This could come from a pandas dataframe for example
X_df, y_df, remaining_labels = self.get_xr_batch(remaining_labels,
lookback_days=lookback_days,
batch_size=batch_size,
y_column=y_column,
label_values=label_values,
oversample=oversample,
variables=variables,
n_jobs=n_jobs)
X_df, y_df = PrepML.prepare_batch_simple(X_df, y_df)
#need to make sure all the variables are in the same order (there was an issue that they weren't between train and test sets)
X_df = X_df.sortby('variable')
end = start + batch_size
print('start: ' + str(start) + ' end: ' + str(end))
#print(str(X.shape))
print(str(X_df.vars.values.shape))
print(str(batch_size))
# I now fill a slice of the np.memmap
X[start:end] = X_df.vars.values[:batch_size] #sometimes the process will add a few extras, filter them
#y_df[:batch_size].to_parquet(self.ml_path + '/y_' + train_or_test + '_batch_' + str(batch) + '_' + file_label + '_' + str(i/batch_size) + '.parquet')
y = pd.concat([y, y_df[:batch_size]])
start = end
del X_df, y_df
#I can now remove the temp file I created
os.remove(X_temp_fn)
# Once the data is loaded on the np.memmap, I save it as a normal np.array
np.save(X_fn, X)
y.to_parquet(self.ml_path + '/y_' + train_or_test + '_batch_' + str(batch) + '_' + file_label + '.parquet')
return remaining_labels, X_fn
def concat_memapped(self, to_concat_filenames, file_label='', dim_1_size=1131, dim_2_size=180, temp_destination_path=None):
"""
concat multiple numpy files on disk in to a single file
required for timeseriesai notebook as input to that is a single memmapped file containing X train and test data
Keyword Arguments:
to_concat_filenames: the files to concat
dim_1_size: number of variables in the files (default: 1131)
dim_2_size: number of lookback dates in the files (length of timeseries) (default: 180)
destination_path: alternate path to put the concat file
"""
if temp_destination_path is None:
temp_destination_path = self.ml_path
to_concat = []
for i in range(len(to_concat_filenames)):
to_concat.append(np.load(to_concat_filenames[i], mmap_mode='r'))
dim_0_size = 0
for i in range(len(to_concat)):
dim_0_size += to_concat[i].shape[0]
assert(to_concat[i].shape[1] == dim_1_size)
assert(to_concat[i].shape[2] == dim_2_size)
X_temp_fn = temp_destination_path + '/temp_X.npy'
np.save(X_temp_fn, np.empty(1))
X_fn = self.ml_path + '/X_all' + '_' + file_label + '.npy'
X = np.memmap(X_temp_fn, dtype='float32', shape=(dim_0_size, dim_1_size, dim_2_size))
dim_0_start = 0
for i in range(len(to_concat)):
print('On file ' + str(i) + ' of ' + str(len(to_concat)))
dim_0 = to_concat[i].shape[0]
X[dim_0_start:dim_0_start+dim_0] = to_concat[i]
dim_0_start += dim_0
#I can now remove the temp file I created
os.remove(X_temp_fn)
# Once the data is loaded on the np.memmap, I save it as a normal np.array
np.save(X_fn, X)
del to_concat
#TODO: add the ability to restart from a cached label file
def generate_train_test_local(self,
train_labels,
test_labels,
num_train_files=1,
num_test_files=1,
num_train_rows_per_file=1000,
num_test_rows_per_file=500,
batch_size=500,
y_column='Day1DangerAboveTreeline',
label_values=['Low', 'Moderate', 'Considerable', 'High'],
oversample={'Low':True, 'Moderate':False, 'Considerable':False, 'High':True},
file_label = '',
temp_destination_path=None,
n_jobs=14):
"""
create several memapped files
we do this as the technique to create one has some memory limitations
also due to the memory limitations sometimes this process runs out of memory and crashes
which is why we cache the label state after every iteration so we can restart at that state
15 mins for 10000 rows using all 16 cores on my machine
I can generate a max of ~50000 rows per batch with 48 gb of ram before running out of memory
Keyword Arguments:
y_column: the column in the label set to use as the label (default: Day1DangerAboveTreeline)
label_values: possible values for the y label (default: ['Low', 'Moderate', 'Considerable', 'High'])
oversample: dictionary defining which labels from the label_values set to apply naive oversampling to (default: {'Low':True, 'Moderate':False, 'Considerable':False, 'High':True})
file_label: optional file label to add to the on disk files to distinguish between data sets
temp_destination_path: alternate path to put the concat file temporarily (improves disk throughput)
"""
assert num_train_rows_per_file % batch_size == 0, 'num_train_rows_per_file needs to be a multiple of batch_size'
assert batch_size <= num_train_rows_per_file, 'num_train_rows_per_file needs to be greater than batch_size'
assert num_test_rows_per_file % batch_size == 0, 'num_test_rows_per_file needs to be a multiple of batch_size'
assert batch_size <= num_test_rows_per_file, 'num_test_rows_per_file needs to be greater than batch_size'
#not all seasons have the same # of variables so find the common subset first
train_seasons = train_labels['season'].unique()
test_seasons = test_labels['season'].unique()
#find the common vars for each season
#pull one sample of data for each season
data = {}
for s in train_seasons:
label = train_labels[train_labels['season'] == s].sample(n = 1)
assert(len(label==1))
data[s] = self.get_data_zarr(label.iloc[0]['UnifiedRegion'], label.iloc[0]['latitude'], label.iloc[0]['longitude'], 7, label.iloc[0]['parsed_date'])
for s in test_seasons:
label = test_labels[test_labels['season'] == s].sample(n = 1)
assert(len(label==1))
data[s] = self.get_data_zarr(label.iloc[0]['UnifiedRegion'], label.iloc[0]['latitude'], label.iloc[0]['longitude'], 7, label.iloc[0]['parsed_date'])
v = []
for d in data.keys():
v.append(set(data[d].variable.values))
final_vars = list(set.intersection(*v))
#get a sample so we can dump the feature labels
X, _, _ = self.get_xr_batch(train_labels, variables=final_vars, lookback_days=7, batch_size=4)
| pd.Series(X.variable.data) | pandas.Series |
from datetime import datetime
import numpy as np
from pandas.tseries.frequencies import get_freq_code as _gfc
from pandas.tseries.index import DatetimeIndex, Int64Index
from pandas.tseries.tools import parse_time_string
import pandas.tseries.frequencies as _freq_mod
import pandas.core.common as com
import pandas.core.datetools as datetools
from pandas._tseries import Timestamp
import pandas._tseries as lib
#---------------
# Period logic
def to_period(arg, freq=None):
""" Attempts to convert arg to timestamp """
if arg is None:
return arg
if type(arg) == float:
raise TypeError("Cannot convert a float to period")
return Period(arg, freq=freq)
class Period(object):
def __init__(self, value=None, freq=None,
year=None, month=1, quarter=None, day=1,
hour=0, minute=0, second=0):
"""
Represents an period of time
Parameters
----------
value : Period or basestring, default None
The time period represented (e.g., '4Q2005')
freq : str, default None
e.g., 'B' for businessday, ('T', 5) or '5T' for 5 minutes
year : int, default None
month : int, default 1
quarter : int, default None
day : int, default 1
hour : int, default 0
minute : int, default 0
second : int, default 0
"""
# freq points to a tuple (base, mult); base is one of the defined
# periods such as A, Q, etc. Every five minutes would be, e.g.,
# ('T', 5) but may be passed in as a string like '5T'
self.freq = None
# ordinal is the period offset from the gregorian proleptic epoch
self.ordinal = None
if value is None:
if freq is None:
raise ValueError("If value is None, freq cannot be None")
if year is None:
raise ValueError("If value is None, year cannot be None")
if quarter is not None:
month = (quarter - 1) * 3 + 1
base, mult = _gfc(freq)
self.ordinal = lib.period_ordinal(year, month, day, hour, minute,
second, base, mult)
elif isinstance(value, Period):
other = value
if freq is None or _gfc(freq) == _gfc(other.freq):
self.ordinal = other.ordinal
freq = other.freq
else:
converted = other.asfreq(freq)
self.ordinal = converted.ordinal
elif isinstance(value, basestring):
value = value.upper()
dt, parsed, reso = parse_time_string(value)
if freq is None:
if reso == 'year':
freq = 'A'
elif reso == 'quarter':
freq = 'Q'
elif reso == 'month':
freq = 'M'
elif reso == 'day':
freq = 'D'
elif reso == 'hour':
freq = 'H'
elif reso == 'minute':
freq = 'T'
elif reso == 'second':
freq = 'S'
else:
raise ValueError("Could not infer frequency for period")
elif isinstance(value, datetime):
dt = value
if freq is None:
raise ValueError('Must supply freq for datetime value')
elif isinstance(value, (int, long)):
if value <= 0:
raise ValueError("Value must be positive")
self.ordinal = value
if freq is None:
raise ValueError('Must supply freq for ordinal value')
else:
msg = "Value must be Period, string, integer, or datetime"
raise ValueError(msg)
base, mult = _gfc(freq)
if self.ordinal is None:
self.ordinal = lib.period_ordinal(dt.year, dt.month, dt.day, dt.hour,
dt.minute, dt.second, base, mult)
self.freq = _freq_mod._get_freq_str(base, mult)
def __eq__(self, other):
if isinstance(other, Period):
return (self.ordinal == other.ordinal
and _gfc(self.freq) == _gfc(other.freq))
return False
def __add__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal + other, self.freq)
raise ValueError("Cannot add with non-integer value")
def __sub__(self, other):
if isinstance(other, (int, long)):
return Period(self.ordinal - other, self.freq)
if isinstance(other, Period):
if other.freq != self.freq:
raise ValueError("Cannot do arithmetic with "
"non-conforming periods")
return self.ordinal - other.ordinal
raise ValueError("Cannot sub with non-integer value")
def asfreq(self, freq=None, how='E'):
"""
Parameters
----------
freq :
how :
Returns
-------
resampled : Period
"""
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
base2, mult2 = _gfc(freq)
new_ordinal = lib.period_asfreq(self.ordinal, base1, mult1,
base2, mult2, how)
return Period(new_ordinal, (base2, mult2))
def start_time(self):
return self.to_timestamp(which_end='S')
def end_time(self):
return self.to_timestamp(which_end='E')
def to_timestamp(self, which_end='S'):
"""
Return the Timestamp at the start/end of the period
Parameters
----------
which_end: str, default 'S' (start)
'S', 'E'. Can be aliased as case insensitive
'Start', 'Finish', 'Begin', 'End'
Returns
-------
Timestamp
"""
which_end = _validate_end_alias(which_end)
new_val = self.asfreq('S', which_end)
base, mult = _gfc(new_val.freq)
return Timestamp(lib.period_ordinal_to_dt64(new_val.ordinal, base, mult))
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year(self.ordinal, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month(self.ordinal, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear(self.ordinal, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.get_period_quarter(self.ordinal, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.get_period_day(self.ordinal, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.get_period_week(self.ordinal, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.get_period_weekday(self.ordinal, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.get_period_dow(self.ordinal, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.get_period_doy(self.ordinal, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.get_period_hour(self.ordinal, base, mult)
@property
def minute(self):
base, mult = _gfc(self.freq)
return lib.get_period_minute(self.ordinal, base, mult)
@property
def second(self):
base, mult = _gfc(self.freq)
return lib.get_period_second(self.ordinal, base, mult)
@classmethod
def now(cls, freq=None):
return Period(datetime.now(), freq=freq)
def __repr__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
freqstr = _freq_mod._reverse_period_code_map[base]
if mult == 1:
return "Period('%s', '%s')" % (formatted, freqstr)
return ("Period('%s', '%d%s')" % (formatted, mult, freqstr))
def __str__(self):
base, mult = _gfc(self.freq)
formatted = lib.period_ordinal_to_string(self.ordinal, base, mult)
return ("%s" % formatted)
def strftime(self, fmt):
"""
Returns the string representation of the :class:`Period`, depending
on the selected :keyword:`format`. :keyword:`format` must be a string
containing one or several directives. The method recognizes the same
directives as the :func:`time.strftime` function of the standard Python
distribution, as well as the specific additional directives ``%f``,
``%F``, ``%q``. (formatting & docs originally from scikits.timeries)
+-----------+--------------------------------+-------+
| Directive | Meaning | Notes |
+===========+================================+=======+
| ``%a`` | Locale's abbreviated weekday | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%A`` | Locale's full weekday name. | |
+-----------+--------------------------------+-------+
| ``%b`` | Locale's abbreviated month | |
| | name. | |
+-----------+--------------------------------+-------+
| ``%B`` | Locale's full month name. | |
+-----------+--------------------------------+-------+
| ``%c`` | Locale's appropriate date and | |
| | time representation. | |
+-----------+--------------------------------+-------+
| ``%d`` | Day of the month as a decimal | |
| | number [01,31]. | |
+-----------+--------------------------------+-------+
| ``%f`` | 'Fiscal' year without a | \(1) |
| | century as a decimal number | |
| | [00,99] | |
+-----------+--------------------------------+-------+
| ``%F`` | 'Fiscal' year with a century | \(2) |
| | as a decimal number | |
+-----------+--------------------------------+-------+
| ``%H`` | Hour (24-hour clock) as a | |
| | decimal number [00,23]. | |
+-----------+--------------------------------+-------+
| ``%I`` | Hour (12-hour clock) as a | |
| | decimal number [01,12]. | |
+-----------+--------------------------------+-------+
| ``%j`` | Day of the year as a decimal | |
| | number [001,366]. | |
+-----------+--------------------------------+-------+
| ``%m`` | Month as a decimal number | |
| | [01,12]. | |
+-----------+--------------------------------+-------+
| ``%M`` | Minute as a decimal number | |
| | [00,59]. | |
+-----------+--------------------------------+-------+
| ``%p`` | Locale's equivalent of either | \(3) |
| | AM or PM. | |
+-----------+--------------------------------+-------+
| ``%q`` | Quarter as a decimal number | |
| | [01,04] | |
+-----------+--------------------------------+-------+
| ``%S`` | Second as a decimal number | \(4) |
| | [00,61]. | |
+-----------+--------------------------------+-------+
| ``%U`` | Week number of the year | \(5) |
| | (Sunday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Sunday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%w`` | Weekday as a decimal number | |
| | [0(Sunday),6]. | |
+-----------+--------------------------------+-------+
| ``%W`` | Week number of the year | \(5) |
| | (Monday as the first day of | |
| | the week) as a decimal number | |
| | [00,53]. All days in a new | |
| | year preceding the first | |
| | Monday are considered to be in | |
| | week 0. | |
+-----------+--------------------------------+-------+
| ``%x`` | Locale's appropriate date | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%X`` | Locale's appropriate time | |
| | representation. | |
+-----------+--------------------------------+-------+
| ``%y`` | Year without century as a | |
| | decimal number [00,99]. | |
+-----------+--------------------------------+-------+
| ``%Y`` | Year with century as a decimal | |
| | number. | |
+-----------+--------------------------------+-------+
| ``%Z`` | Time zone name (no characters | |
| | if no time zone exists). | |
+-----------+--------------------------------+-------+
| ``%%`` | A literal ``'%'`` character. | |
+-----------+--------------------------------+-------+
.. note::
(1)
The ``%f`` directive is the same as ``%y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(2)
The ``%F`` directive is the same as ``%Y`` if the frequency is
not quarterly.
Otherwise, it corresponds to the 'fiscal' year, as defined by
the :attr:`qyear` attribute.
(3)
The ``%p`` directive only affects the output hour field
if the ``%I`` directive is used to parse the hour.
(4)
The range really is ``0`` to ``61``; this accounts for leap
seconds and the (very rare) double leap seconds.
(5)
The ``%U`` and ``%W`` directives are only used in calculations
when the day of the week and the year are specified.
.. rubric:: Examples
>>> a = Period(freq='Q@JUL', year=2006, quarter=1)
>>> a.strftime('%F-Q%q')
'2006-Q1'
>>> # Output the last month in the quarter of this date
>>> a.strftime('%b-%Y')
'Oct-2005'
>>>
>>> a = Period(freq='D', year=2001, month=1, day=1)
>>> a.strftime('%d-%b-%Y')
'01-Jan-2006'
>>> a.strftime('%b. %d, %Y was a %A')
'Jan. 01, 2001 was a Monday'
"""
base, mult = _gfc(self.freq)
if fmt is not None:
return lib.period_strftime(self.ordinal, base, mult, fmt)
else:
return lib.period_ordinal_to_string(self.ordinal, base, mult)
def _period_unbox(key, check=None):
'''
Period-like => int64
'''
if not isinstance(key, Period):
key = Period(key, freq=check)
elif check is not None:
if key.freq != check:
raise ValueError("%s is wrong freq" % key)
return np.int64(key.ordinal)
def _period_unbox_array(arr, check=None):
if arr is None:
return arr
unboxer = np.frompyfunc(lambda x: _period_unbox(x, check=check), 1, 1)
return unboxer(arr)
def _period_box(val, freq):
return Period(val, freq=freq)
def _period_box_array(arr, freq):
if arr is None:
return arr
if not isinstance(arr, np.ndarray):
return arr
boxfunc = lambda x: _period_box(x, freq)
boxer = np.frompyfunc(boxfunc, 1, 1)
return boxer(arr)
def dt64arr_to_periodarr(data, freq):
if data is None:
return data
if isinstance(freq, basestring):
base, mult = _gfc(freq)
else:
base, mult = freq
return lib.dt64arr_to_periodarr(data.view('i8'), base, mult)
# --- Period index sketch
class PeriodIndex(Int64Index):
"""
Immutable ndarray holding ordinal values indicating regular periods in
time such as particular years, quarters, months, etc. A value of 1 is the
period containing the Gregorian proleptic datetime Jan 1, 0001 00:00:00.
This ordinal representation is from the scikits.timeseries project.
For instance,
# construct period for day 1/1/1 and get the first second
i = Period(year=1,month=1,day=1,freq='D').asfreq('S', 'S')
i.ordinal
===> 1
Index keys are boxed to Period objects which carries the metadata (eg,
frequency information).
Parameters
----------
data : array-like (1-dimensional), optional
Optional period-like data to construct index with
dtype : NumPy dtype (default: i8)
copy : bool
Make a copy of input ndarray
freq : string or period object, optional
One of pandas period strings or corresponding objects
start : starting value, period-like, optional
If data is None, used as the start point in generating regular
period data.
periods : int, optional, > 0
Number of periods to generate, if generating index. Takes precedence
over end argument
end : end value, period-like, optional
If periods is none, generated index will extend to first conforming
period on or just past end argument
"""
def __new__(cls, data=None,
freq=None, start=None, end=None, periods=None,
copy=False, name=None):
if isinstance(freq, Period):
freq = freq.freq
else:
freq = datetools.get_standard_freq(freq)
if data is None:
if start is None and end is None:
raise ValueError('Must specify start, end, or data')
start = to_period(start, freq)
end = to_period(end, freq)
is_start_intv = isinstance(start, Period)
is_end_intv = isinstance(end, Period)
if (start is not None and not is_start_intv):
raise ValueError('Failed to convert %s to period' % start)
if (end is not None and not is_end_intv):
raise ValueError('Failed to convert %s to period' % end)
if is_start_intv and is_end_intv and (start.freq != end.freq):
raise ValueError('Start and end must have same freq')
if freq is None:
if is_start_intv:
freq = start.freq
elif is_end_intv:
freq = end.freq
else:
raise ValueError('Could not infer freq from start/end')
if periods is not None:
if start is None:
data = np.arange(end.ordinal - periods + 1,
end.ordinal + 1,
dtype=np.int64)
else:
data = np.arange(start.ordinal, start.ordinal + periods,
dtype=np.int64)
else:
if start is None or end is None:
msg = 'Must specify both start and end if periods is None'
raise ValueError(msg)
data = np.arange(start.ordinal, end.ordinal+1, dtype=np.int64)
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
if not isinstance(data, np.ndarray):
if np.isscalar(data):
raise ValueError('PeriodIndex() must be called with a '
'collection of some kind, %s was passed'
% repr(data))
if isinstance(data, Period):
data = [data]
# other iterable of some kind
if not isinstance(data, (list, tuple)):
data = list(data)
try:
data = np.array(data, dtype='i8')
except:
data = np.array(data, dtype='O')
if freq is None:
raise ValueError('freq cannot be none')
data = _period_unbox_array(data, check=freq)
else:
if isinstance(data, PeriodIndex):
if freq is None or freq == data.freq:
freq = data.freq
data = data.values
else:
base1, mult1 = _gfc(data.freq)
base2, mult2 = _gfc(freq)
data = lib.period_asfreq_arr(data.values, base1, mult1,
base2, mult2, 'E')
else:
if freq is None:
raise ValueError('freq cannot be none')
if data.dtype == np.datetime64:
data = dt64arr_to_periodarr(data, freq)
elif data.dtype == np.int64:
pass
else:
data = data.astype('i8')
data = np.array(data, dtype=np.int64, copy=False)
if (data <= 0).any():
raise ValueError("Found illegal (<= 0) values in data")
subarr = data.view(cls)
subarr.name = name
subarr.freq = freq
return subarr
@property
def is_all_dates(self):
return True
def asfreq(self, freq=None, how='E'):
how = _validate_end_alias(how)
base1, mult1 = _gfc(self.freq)
if isinstance(freq, basestring):
base2, mult2 = _gfc(freq)
else:
base2, mult2 = freq
new_data = lib.period_asfreq_arr(self.values,
base1, mult1,
base2, mult2, how)
return PeriodIndex(new_data, freq=freq)
@property
def year(self):
base, mult = _gfc(self.freq)
return lib.get_period_year_arr(self.values, base, mult)
@property
def month(self):
base, mult = _gfc(self.freq)
return lib.get_period_month_arr(self.values, base, mult)
@property
def qyear(self):
base, mult = _gfc(self.freq)
return lib.get_period_qyear_arr(self.values, base, mult)
@property
def quarter(self):
base, mult = _gfc(self.freq)
return lib.get_period_quarter_arr(self.values, base, mult)
@property
def day(self):
base, mult = _gfc(self.freq)
return lib.get_period_day_arr(self.values, base, mult)
@property
def week(self):
base, mult = _gfc(self.freq)
return lib.get_period_week_arr(self.values, base, mult)
@property
def weekday(self):
base, mult = _gfc(self.freq)
return lib.get_period_weekday_arr(self.values, base, mult)
@property
def day_of_week(self):
base, mult = _gfc(self.freq)
return lib.get_period_dow_arr(self.values, base, mult)
@property
def day_of_year(self):
base, mult = _gfc(self.freq)
return lib.get_period_doy_arr(self.values, base, mult)
@property
def hour(self):
base, mult = _gfc(self.freq)
return lib.get_period_hour_arr(self.values, base, mult)
@property
def minute(self):
base, mult = | _gfc(self.freq) | pandas.tseries.frequencies.get_freq_code |
from flask import Flask, render_template, jsonify, request
from flask_pymongo import PyMongo
from flask_cors import CORS, cross_origin
import json
import collections
import numpy as np
import re
from numpy import array
from statistics import mode
import pandas as pd
import warnings
import copy
from joblib import Memory
from itertools import chain
import ast
import timeit
from sklearn.neighbors import KNeighborsClassifier # 1 neighbors
from sklearn.svm import SVC # 1 svm
from sklearn.naive_bayes import GaussianNB # 1 naive bayes
from sklearn.neural_network import MLPClassifier # 1 neural network
from sklearn.linear_model import LogisticRegression # 1 linear model
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis, QuadraticDiscriminantAnalysis # 2 discriminant analysis
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier, AdaBoostClassifier, GradientBoostingClassifier # 4 ensemble models
from joblib import Parallel, delayed
import multiprocessing
from sklearn.pipeline import make_pipeline
from sklearn import model_selection
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import log_loss
from sklearn.metrics import fbeta_score
from sklearn.metrics import accuracy_score
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
from sklearn.metrics import f1_score
from imblearn.metrics import geometric_mean_score
import umap
from sklearn.metrics import classification_report
from sklearn.preprocessing import scale
import eli5
from eli5.sklearn import PermutationImportance
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
from sklearn.feature_selection import RFE
from sklearn.decomposition import PCA
from mlxtend.classifier import StackingCVClassifier
from mlxtend.feature_selection import ColumnSelector
from sklearn.model_selection import GridSearchCV
from sklearn.model_selection import ShuffleSplit
from scipy.spatial import procrustes
# This block of code == for the connection between the server, the database, and the client (plus routing).
# Access MongoDB
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/mydb"
mongo = PyMongo(app)
cors = CORS(app, resources={r"/data/*": {"origins": "*"}})
# Retrieve data from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/Reset', methods=["GET", "POST"])
def Reset():
global DataRawLength
global DataResultsRaw
global previousState
previousState = []
global filterActionFinal
filterActionFinal = ''
global keySpecInternal
keySpecInternal = 1
global dataSpacePointsIDs
dataSpacePointsIDs = []
global previousStateActive
previousStateActive = []
global StanceTest
StanceTest = False
global status
status = True
global factors
factors = [1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,1,1,1]
global KNNModelsCount
global SVCModelsCount
global GausNBModelsCount
global MLPModelsCount
global LRModelsCount
global LDAModelsCount
global QDAModelsCount
global RFModelsCount
global ExtraTModelsCount
global AdaBModelsCount
global GradBModelsCount
global keyData
keyData = 0
KNNModelsCount = 0
SVCModelsCount = 576
GausNBModelsCount = 736
MLPModelsCount = 1236
LRModelsCount = 1356
LDAModelsCount = 1996
QDAModelsCount = 2196
RFModelsCount = 2446
ExtraTModelsCount = 2606
AdaBModelsCount = 2766
GradBModelsCount = 2926
global XData
XData = []
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global detailsParams
detailsParams = []
global algorithmList
algorithmList = []
global ClassifierIDsList
ClassifierIDsList = ''
# Initializing models
global resultsList
resultsList = []
global RetrieveModelsList
RetrieveModelsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global crossValidation
crossValidation = 5
# models
global KNNModels
KNNModels = []
global RFModels
RFModels = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
return 'The reset was done!'
# Retrieve data from client and select the correct data set
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/ServerRequest', methods=["GET", "POST"])
def RetrieveFileName():
global DataRawLength
global DataResultsRaw
global DataResultsRawTest
global DataRawLengthTest
fileName = request.get_data().decode('utf8').replace("'", '"')
global keySpecInternal
keySpecInternal = 1
global filterActionFinal
filterActionFinal = ''
global dataSpacePointsIDs
dataSpacePointsIDs = []
global RANDOM_SEED
RANDOM_SEED = 42
global keyData
keyData = 0
global XData
XData = []
global previousState
previousState = []
global previousStateActive
previousStateActive = []
global status
status = True
global yData
yData = []
global XDataStored
XDataStored = []
global yDataStored
yDataStored = []
global filterDataFinal
filterDataFinal = 'mean'
global ClassifierIDsList
ClassifierIDsList = ''
global algorithmList
algorithmList = []
global detailsParams
detailsParams = []
# Initializing models
global RetrieveModelsList
RetrieveModelsList = []
global resultsList
resultsList = []
global allParametersPerformancePerModel
allParametersPerformancePerModel = []
global all_classifiers
all_classifiers = []
global scoring
scoring = {'accuracy': 'accuracy', 'precision_micro': 'precision_micro', 'precision_macro': 'precision_macro', 'precision_weighted': 'precision_weighted', 'recall_micro': 'recall_micro', 'recall_macro': 'recall_macro', 'recall_weighted': 'recall_weighted', 'roc_auc_ovo_weighted': 'roc_auc_ovo_weighted'}
global loopFeatures
loopFeatures = 2
# models
global KNNModels
global SVCModels
global GausNBModels
global MLPModels
global LRModels
global LDAModels
global QDAModels
global RFModels
global ExtraTModels
global AdaBModels
global GradBModels
KNNModels = []
SVCModels = []
GausNBModels = []
MLPModels = []
LRModels = []
LDAModels = []
QDAModels = []
RFModels = []
ExtraTModels = []
AdaBModels = []
GradBModels = []
global results
results = []
global resultsMetrics
resultsMetrics = []
global parametersSelData
parametersSelData = []
global StanceTest
StanceTest = False
global target_names
target_names = []
global target_namesLoc
target_namesLoc = []
DataRawLength = -1
DataRawLengthTest = -1
data = json.loads(fileName)
if data['fileName'] == 'HeartC':
CollectionDB = mongo.db.HeartC.find()
elif data['fileName'] == 'StanceC':
StanceTest = True
CollectionDB = mongo.db.StanceC.find()
CollectionDBTest = mongo.db.StanceCTest.find()
elif data['fileName'] == 'DiabetesC':
CollectionDB = mongo.db.diabetesC.find()
elif data['fileName'] == 'BreastC':
CollectionDB = mongo.db.breastC.find()
elif data['fileName'] == 'WineC':
CollectionDB = mongo.db.WineC.find()
elif data['fileName'] == 'ContraceptiveC':
CollectionDB = mongo.db.ContraceptiveC.find()
elif data['fileName'] == 'VehicleC':
CollectionDB = mongo.db.VehicleC.find()
elif data['fileName'] == 'BiodegC':
StanceTest = True
CollectionDB = mongo.db.biodegC.find()
CollectionDBTest = mongo.db.biodegCTest.find()
else:
CollectionDB = mongo.db.IrisC.find()
DataResultsRaw = []
for index, item in enumerate(CollectionDB):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRaw.append(item)
DataRawLength = len(DataResultsRaw)
DataResultsRawTest = []
if (StanceTest):
for index, item in enumerate(CollectionDBTest):
item['_id'] = str(item['_id'])
item['InstanceID'] = index
DataResultsRawTest.append(item)
DataRawLengthTest = len(DataResultsRawTest)
DataSetSelection()
return 'Everything is okay'
def Convert(lst):
it = iter(lst)
res_dct = dict(zip(it, it))
return res_dct
# Retrieve data set from client
@cross_origin(origin='localhost',headers=['Content-Type','Authorization'])
@app.route('/data/SendtoSeverDataSet', methods=["GET", "POST"])
def SendToServerData():
uploadedData = request.get_data().decode('utf8').replace("'", '"')
uploadedDataParsed = json.loads(uploadedData)
DataResultsRaw = uploadedDataParsed['uploadedData']
DataResults = copy.deepcopy(DataResultsRaw)
for dictionary in DataResultsRaw:
for key in dictionary.keys():
if (key.find('*') != -1):
target = key
continue
continue
DataResultsRaw.sort(key=lambda x: x[target], reverse=True)
DataResults.sort(key=lambda x: x[target], reverse=True)
for dictionary in DataResults:
del dictionary[target]
global AllTargets
global target_names
global target_namesLoc
AllTargets = [o[target] for o in DataResultsRaw]
AllTargetsFloatValues = []
previous = None
Class = 0
for i, value in enumerate(AllTargets):
if (i == 0):
previous = value
target_names.append(value)
if (value == previous):
AllTargetsFloatValues.append(Class)
else:
Class = Class + 1
target_names.append(value)
AllTargetsFloatValues.append(Class)
previous = value
ArrayDataResults = | pd.DataFrame.from_dict(DataResults) | pandas.DataFrame.from_dict |
import pandas as pd
import numpy as np
import sklearn
import os
import sys
sys.path.append('../../code/scripts')
from dataset_chunking_fxns import add_stratified_kfold_splits
# Load data into pd dataframes and adjust feature names
data_dir = '../../data/adult'
file_train = os.path.join(data_dir, 'adult.data')
file_test = os.path.join(data_dir, 'adult.test')
train_df = pd.read_csv(file_train, header=None, na_values='?')
test_df = pd.read_csv(file_test, header=None, na_values='?',skiprows=[0])
features = ['age', 'workclass', 'final-weight', 'education', 'education-num', 'marital-status', 'occupation',
'relationship', 'race', 'sex', 'capital-gain', 'capital-loss', 'hours-per-week', 'native-country', 'label']
train_df.columns = features
test_df.columns = features
print("Original number of points in training:", len(train_df))
print("Original number of points in test:", len(test_df))
print()
#drop final-weight feature because it's a measure of population proportion represented by the profile
train_df = train_df.drop(['final-weight'], axis=1)
test_df = test_df.drop(['final-weight'], axis=1)
feat_list = list(train_df.keys())
feat_list.remove('label')
print('number of features before one-hot encoding:', len(feat_list))
# train data: one hot encode non-binary discontinuous features
print("One hot encoding the following non-binary, discontinuous features:")
one_hot_columns = ['workclass', 'education', 'marital-status', 'occupation', 'relationship', 'race', 'native-country']
for col in one_hot_columns:
print(col)
print()
one_hot_workclass = pd.get_dummies(train_df['workclass'])
for feature in one_hot_columns:
one_hot_encoding = pd.get_dummies(train_df[feature])
if ' ?' in one_hot_encoding.columns:
one_hot_encoding = one_hot_encoding.drop([' ?'], axis=1)
train_df = train_df.join(one_hot_encoding)
train_df = train_df.drop(one_hot_columns, axis=1)
# train data: change binary features to 0/1
binary_columns = ['sex', 'label']
for feature in binary_columns:
one_hot_encoding = pd.get_dummies(train_df[feature])
binary_encoding = one_hot_encoding.drop([one_hot_encoding.columns[0]], axis=1)
train_df = train_df.join(binary_encoding)
train_df = train_df.drop(binary_columns, axis=1)
print('New name of train labels column:', train_df.columns[len(train_df.columns)-1])
# test data: one hot encode non-binary discontinuous features
one_hot_workclass = | pd.get_dummies(test_df['workclass']) | pandas.get_dummies |
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnLSTMtanhMakespan0=[799, 798, 799, 799, 805, 806, 799, 805, 805, 800, 798, 798]
drnnLSTMtanhMakespan1=[800, 798, 796, 800, 796, 794, 795, 798, 800, 798, 805, 798]
drnnLSTMtanhMakespan2=[796, 800, 798, 804, 800, 798, 798, 798, 800, 800, 802, 797]
drnnLSTMtanhMakespan3=[805, 800, 800, 803, 794, 802, 800, 798, 799, 804, 799, 806]
drnnLSTMtanhMakespan4=[796, 798, 795, 798, 796, 799, 800, 796, 796, 798, 806, 800]
drnnLSTMtanhMakespan5=[798, 798, 799, 800, 800, 808, 798, 798, 801, 796, 799, 798]
drnnLSTMtanhMakespan6=[800, 796, 805, 798, 798, 796, 799, 800, 803, 800, 798, 800]
drnnLSTMtanhMakespan7=[799, 805, 802, 805, 800, 799, 800, 799, 805, 800, 794, 796]
drnnLSTMtanhMakespan8=[799, 798, 800, 798, 798, 800, 800, 800, 804, 799, 800, 804]
drnnLSTMtanhMakespan9=[795, 800, 795, 796, 798, 796, 797, 800, 797, 798, 796, 795]
drnnLSTMtanhMakespan10=[804, 799, 805, 798, 798, 798, 805, 800, 796, 804, 796, 799]
drnnLSTMtanhMakespan11=[795, 803, 805, 798, 795, 801, 798, 798, 804, 803, 799, 804]
drnnLSTMtanhMakespan12=[798, 798, 799, 800, 798, 798, 799, 799, 801, 796, 799, 798]
drnnLSTMtanhMakespan13=[798, 798, 799, 797, 796, 796, 800, 797, 805, 800, 800, 794]
drnnLSTMtanhMakespan14=[800, 798, 798, 796, 800, 800, 798, 798, 802, 798, 802, 798]
drnnLSTMtanhMakespan15=[796, 796, 800, 801, 800, 800, 796, 794, 796, 800, 796, 798]
drnnLSTMtanhMakespan16=[798, 798, 795, 797, 795, 799, 800, 796, 795, 796, 800, 800]
drnnLSTMtanhMakespan17=[794, 795, 800, 798, 795, 796, 798, 796, 795, 794, 798, 796]
drnnLSTMtanhMakespan18=[797, 795, 794, 794, 800, 796, 796, 795, 798, 795, 798, 794]
drnnLSTMtanhMakespan19=[797, 795, 795, 796, 798, 799, 795, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan20=[796, 794, 798, 797, 798, 799, 795, 795, 797, 795, 795, 792]
drnnLSTMtanhMakespan21=[797, 795, 797, 793, 794, 794, 800, 794, 798, 795, 797, 795]
drnnLSTMtanhMakespan22=[794, 800, 798, 795, 795, 796, 796, 799, 795, 794, 795, 795]
drnnLSTMtanhMakespan23=[795, 795, 794, 795, 794, 794, 797, 799, 796, 794, 794, 795]
drnnLSTMtanhMakespan24=[798, 795, 795, 795, 792, 794, 795, 794, 794, 795, 795, 795]
drnnLSTMtanhMakespan25=[794, 792, 794, 795, 795, 794, 794, 794, 794, 795, 794, 793]
drnnLSTMtanhMakespan26=[794, 794, 795, 796, 798, 795, 794, 794, 794, 794, 795, 794]
drnnLSTMtanhMakespan27=[795, 794, 795, 795, 795, 794, 794, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan28=[795, 794, 794, 795, 794, 795, 795, 795, 795, 794, 795, 794]
drnnLSTMtanhMakespan29=[792, 794, 795, 794, 794, 795, 794, 793, 795, 794, 795, 792]
drnnLSTMtanhMakespan30=[795, 794, 795, 795, 794, 794, 794, 795, 794, 794, 794, 794]
drnnLSTMtanhMakespan31=[794, 794, 795, 794, 795, 793, 795, 795, 795, 792, 794, 794]
drnnLSTMtanhMakespan32=[795, 795, 794, 793, 795, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMtanhMakespan33=[793, 794, 795, 793, 792, 795, 794, 794, 794, 794, 794, 795]
drnnLSTMtanhMakespan34=[794, 795, 795, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drnnLSTMtanhMakespan35=[794, 794, 797, 793, 792, 794, 793, 794, 795, 794, 795, 792]
drnnLSTMtanhMakespan36=[794, 794, 793, 794, 795, 797, 795, 795, 794, 795, 793, 794]
drnnLSTMtanhMakespan37=[795, 793, 795, 794, 795, 798, 795, 794, 795, 793, 795, 794]
drnnLSTMtanhMakespan38=[794, 795, 793, 795, 794, 794, 794, 794, 794, 794, 797, 795]
drnnLSTMtanhMakespan39=[794, 794, 795, 794, 795, 795, 794, 795, 794, 795, 798, 797]
drnnLSTMtanhMakespan40=[795, 795, 794, 795, 794, 795, 795, 794, 794, 794, 795, 795]
drnnLSTMtanhMakespan41=[794, 795, 792, 794, 794, 798, 795, 794, 794, 794, 793, 795]
drnnLSTMtanhMakespan42=[793, 795, 794, 793, 794, 794, 792, 794, 795, 794, 794, 793]
drnnLSTMtanhMakespan43=[793, 792, 793, 794, 794, 795, 792, 794, 795, 794, 795, 794]
drnnLSTMtanhMakespan44=[793, 794, 795, 795, 794, 794, 795, 798, 794, 792, 795, 794]
drnnLSTMtanhMakespan45=[795, 794, 794, 794, 794, 792, 794, 795, 794, 796, 795, 794]
drnnLSTMtanhMakespan46=[794, 793, 793, 795, 795, 794, 794, 794, 794, 796, 794, 794]
drnnLSTMtanhMakespan47=[794, 794, 795, 794, 794, 795, 792, 795, 794, 795, 795, 794]
drnnLSTMtanhMakespan48=[794, 795, 794, 794, 794, 792, 794, 795, 796, 794, 794, 795]
drnnLSTMtanhMakespan49=[794, 794, 794, 794, 794, 794, 792, 794, 793, 794, 795, 794]
drnnLSTMtanhRewards0=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17725973169122497, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards1=[-0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765]
drnnLSTMtanhRewards2=[-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17653532907770195, -0.17562802996914942]
drnnLSTMtanhRewards3=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17671654929577466, -0.17508269018743108, -0.17653532907770195, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.1768976897689769, -0.1759911894273128, -0.17725973169122497]
drnnLSTMtanhRewards4=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.17617264919621228]
drnnLSTMtanhRewards5=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards6=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMtanhRewards7=[-0.1759911894273128, -0.177078750549934, -0.17653532907770195, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drnnLSTMtanhRewards8=[-0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769]
drnnLSTMtanhRewards9=[-0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drnnLSTMtanhRewards10=[-0.1768976897689769, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128]
drnnLSTMtanhRewards11=[-0.17526455026455026, -0.17671654929577466, -0.177078750549934, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.1759911894273128, -0.1768976897689769]
drnnLSTMtanhRewards12=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1763540290620872, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drnnLSTMtanhRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108]
drnnLSTMtanhRewards14=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765]
drnnLSTMtanhRewards15=[-0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnLSTMtanhRewards16=[-0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228]
drnnLSTMtanhRewards17=[-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drnnLSTMtanhRewards18=[-0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drnnLSTMtanhRewards19=[-0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards20=[-0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards21=[-0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards22=[-0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards23=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards24=[-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards25=[-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards26=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards27=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards28=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards29=[-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards31=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards32=[-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards33=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards34=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards35=[-0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224]
drnnLSTMtanhRewards36=[-0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108]
drnnLSTMtanhRewards37=[-0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards38=[-0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMtanhRewards39=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drnnLSTMtanhRewards40=[-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMtanhRewards41=[-0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drnnLSTMtanhRewards42=[-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drnnLSTMtanhRewards43=[-0.1749007498897221, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards44=[-0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards45=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards46=[-0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMtanhRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMtanhRewards48=[-0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drnnLSTMtanhRewards49=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
# Deep Recurrent Reinforcement Learning: 1 capa LSTM y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnLSTMreluMakespan0=[805, 800, 800, 800, 794, 800, 798, 809, 795, 800, 798, 798]
drnnLSTMreluMakespan1=[798, 798, 796, 799, 800, 796, 796, 798, 798, 794, 798, 800]
drnnLSTMreluMakespan2=[805, 805, 798, 799, 806, 799, 806, 799, 800, 798, 805, 795]
drnnLSTMreluMakespan3=[800, 800, 800, 796, 800, 800, 799, 806, 808, 798, 797, 798]
drnnLSTMreluMakespan4=[805, 805, 795, 796, 799, 804, 798, 794, 798, 794, 796, 810]
drnnLSTMreluMakespan5=[798, 798, 798, 795, 800, 798, 796, 802, 800, 800, 805, 801]
drnnLSTMreluMakespan6=[800, 798, 798, 795, 800, 796, 800, 798, 799, 796, 805, 800]
drnnLSTMreluMakespan7=[800, 800, 800, 799, 798, 798, 800, 805, 800, 799, 800, 801]
drnnLSTMreluMakespan8=[799, 800, 800, 799, 795, 795, 805, 795, 798, 800, 798, 800]
drnnLSTMreluMakespan9=[800, 796, 805, 798, 798, 795, 805, 800, 799, 795, 800, 805]
drnnLSTMreluMakespan10=[805, 798, 805, 800, 801, 805, 799, 805, 798, 800, 800, 798]
drnnLSTMreluMakespan11=[798, 803, 800, 797, 795, 796, 794, 799, 800, 800, 800, 796]
drnnLSTMreluMakespan12=[799, 798, 799, 795, 798, 795, 798, 798, 798, 795, 798, 798]
drnnLSTMreluMakespan13=[798, 798, 799, 796, 798, 796, 800, 799, 796, 794, 796, 795]
drnnLSTMreluMakespan14=[796, 798, 806, 799, 804, 798, 805, 798, 800, 805, 794, 800]
drnnLSTMreluMakespan15=[806, 795, 800, 796, 798, 796, 810, 798, 799, 798, 800, 800]
drnnLSTMreluMakespan16=[799, 796, 798, 798, 798, 800, 798, 810, 796, 805, 800, 795]
drnnLSTMreluMakespan17=[798, 798, 798, 794, 798, 805, 801, 798, 800, 799, 798, 798]
drnnLSTMreluMakespan18=[795, 800, 794, 798, 797, 798, 794, 800, 797, 796, 794, 794]
drnnLSTMreluMakespan19=[798, 802, 794, 798, 799, 795, 797, 795, 800, 796, 797, 796]
drnnLSTMreluMakespan20=[794, 797, 795, 794, 799, 795, 795, 795, 800, 797, 794, 798]
drnnLSTMreluMakespan21=[799, 798, 796, 795, 794, 798, 795, 795, 798, 798, 795, 794]
drnnLSTMreluMakespan22=[794, 794, 795, 797, 795, 795, 795, 792, 794, 795, 794, 794]
drnnLSTMreluMakespan23=[794, 794, 794, 794, 795, 796, 793, 794, 795, 794, 797, 795]
drnnLSTMreluMakespan24=[794, 792, 792, 794, 796, 792, 794, 795, 794, 792, 796, 795]
drnnLSTMreluMakespan25=[794, 795, 795, 794, 794, 792, 795, 792, 795, 794, 794, 794]
drnnLSTMreluMakespan26=[795, 794, 794, 795, 794, 794, 793, 794, 797, 795, 794, 795]
drnnLSTMreluMakespan27=[794, 794, 795, 796, 795, 797, 794, 794, 795, 801, 794, 795]
drnnLSTMreluMakespan28=[795, 795, 795, 795, 794, 792, 794, 797, 794, 795, 795, 795]
drnnLSTMreluMakespan29=[794, 792, 798, 794, 797, 795, 793, 795, 795, 794, 795, 795]
drnnLSTMreluMakespan30=[795, 794, 798, 794, 794, 795, 792, 796, 794, 796, 794, 794]
drnnLSTMreluMakespan31=[794, 795, 795, 794, 795, 794, 795, 795, 794, 794, 795, 795]
drnnLSTMreluMakespan32=[798, 794, 794, 794, 798, 792, 795, 795, 795, 796, 794, 795]
drnnLSTMreluMakespan33=[794, 796, 794, 794, 794, 795, 794, 794, 797, 793, 793, 795]
drnnLSTMreluMakespan34=[794, 794, 795, 794, 794, 793, 794, 795, 793, 795, 795, 794]
drnnLSTMreluMakespan35=[798, 796, 795, 794, 795, 795, 795, 795, 794, 795, 797, 795]
drnnLSTMreluMakespan36=[794, 796, 794, 794, 794, 794, 795, 795, 797, 796, 795, 795]
drnnLSTMreluMakespan37=[795, 794, 796, 795, 795, 795, 795, 794, 792, 797, 794, 793]
drnnLSTMreluMakespan38=[794, 798, 794, 792, 794, 792, 795, 797, 793, 794, 794, 797]
drnnLSTMreluMakespan39=[792, 794, 794, 794, 792, 795, 795, 795, 794, 794, 795, 794]
drnnLSTMreluMakespan40=[792, 795, 795, 792, 795, 795, 794, 795, 794, 795, 794, 795]
drnnLSTMreluMakespan41=[794, 797, 795, 794, 795, 795, 798, 794, 795, 796, 796, 794]
drnnLSTMreluMakespan42=[794, 795, 795, 795, 794, 795, 795, 794, 794, 795, 793, 795]
drnnLSTMreluMakespan43=[795, 794, 795, 794, 795, 795, 792, 794, 794, 795, 794, 795]
drnnLSTMreluMakespan44=[795, 794, 792, 795, 794, 794, 795, 794, 796, 795, 796, 794]
drnnLSTMreluMakespan45=[795, 794, 793, 794, 793, 795, 794, 794, 795, 794, 795, 794]
drnnLSTMreluMakespan46=[794, 796, 793, 794, 794, 795, 799, 795, 794, 794, 794, 794]
drnnLSTMreluMakespan47=[794, 794, 794, 794, 795, 793, 795, 795, 794, 795, 795, 795]
drnnLSTMreluMakespan48=[794, 794, 795, 794, 795, 795, 795, 794, 794, 795, 795, 794]
drnnLSTMreluMakespan49=[795, 795, 795, 794, 795, 795, 794, 795, 793, 793, 792, 792]
drnnLSTMreluRewards0=[-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.1778021978021978, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards1=[-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards2=[-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17725973169122497, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17526455026455026]
drnnLSTMreluRewards3=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnLSTMreluRewards4=[-0.177078750549934, -0.177078750549934, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17798286090969018]
drnnLSTMreluRewards5=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17653532907770195, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.1763540290620872]
drnnLSTMreluRewards6=[-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.17617264919621228]
drnnLSTMreluRewards7=[-0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872]
drnnLSTMreluRewards8=[-0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228]
drnnLSTMreluRewards9=[-0.17617264919621228, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17617264919621228, -0.177078750549934]
drnnLSTMreluRewards10=[-0.177078750549934, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drnnLSTMreluRewards11=[-0.17580964970257765, -0.17671654929577466, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387]
drnnLSTMreluRewards12=[-0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards13=[-0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards14=[-0.17544633017412387, -0.17580964970257765, -0.17725973169122497, -0.1759911894273128, -0.1768976897689769, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228]
drnnLSTMreluRewards15=[-0.17725973169122497, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17798286090969018, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnLSTMreluRewards16=[-0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17544633017412387, -0.177078750549934, -0.17617264919621228, -0.17526455026455026]
drnnLSTMreluRewards17=[-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnLSTMreluRewards18=[-0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards19=[-0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387]
drnnLSTMreluRewards20=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765]
drnnLSTMreluRewards21=[-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards22=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards23=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards24=[-0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17544633017412387, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026]
drnnLSTMreluRewards25=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards26=[-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards27=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards28=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards29=[-0.17508269018743108, -0.17471872931833224, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards30=[-0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards31=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards32=[-0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards33=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards34=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards35=[-0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026]
drnnLSTMreluRewards36=[-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards37=[-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17562802996914942, -0.17508269018743108, -0.1749007498897221]
drnnLSTMreluRewards38=[-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
drnnLSTMreluRewards39=[-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards40=[-0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards41=[-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards42=[-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drnnLSTMreluRewards43=[-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drnnLSTMreluRewards44=[-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drnnLSTMreluRewards45=[-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards46=[-0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnLSTMreluRewards47=[-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drnnLSTMreluRewards48=[-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drnnLSTMreluRewards49=[-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.1749007498897221, -0.17471872931833224, -0.17471872931833224]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion tanh, 12 episodes, 50 iteraciones
drnnGRUtanhMakespan0 = [798, 799, 798, 804, 805, 799, 801, 801, 801, 799, 798, 796]
drnnGRUtanhMakespan1 = [800, 798, 798, 798, 798, 798, 801, 798, 795, 796, 800, 796]
drnnGRUtanhMakespan2 = [795, 804, 805, 800, 800, 796, 804, 800, 795, 798, 798, 801]
drnnGRUtanhMakespan3 = [806, 796, 794, 797, 798, 800, 800, 808, 805, 798, 800, 809]
drnnGRUtanhMakespan4 = [805, 801, 795, 798, 798, 800, 796, 796, 805, 798, 799, 798]
drnnGRUtanhMakespan5 = [804, 799, 798, 804, 796, 799, 798, 805, 796, 805, 798, 800]
drnnGRUtanhMakespan6 = [800, 799, 794, 801, 799, 796, 800, 804, 797, 796, 800, 798]
drnnGRUtanhMakespan7 = [798, 800, 810, 810, 805, 800, 795, 798, 800, 805, 799, 800]
drnnGRUtanhMakespan8 = [798, 797, 800, 800, 804, 805, 798, 798, 801, 795, 798, 809]
drnnGRUtanhMakespan9 = [803, 800, 800, 805, 805, 798, 804, 803, 805, 801, 810, 801]
drnnGRUtanhMakespan10 = [798, 799, 798, 798, 805, 804, 805, 798, 799, 798, 800, 800]
drnnGRUtanhMakespan11 = [796, 795, 805, 800, 800, 798, 795, 804, 805, 798, 800, 800]
drnnGRUtanhMakespan12 = [799, 799, 809, 800, 799, 799, 797, 805, 799, 800, 798, 795]
drnnGRUtanhMakespan13 = [805, 800, 800, 805, 800, 799, 798, 801, 798, 797, 805, 800]
drnnGRUtanhMakespan14 = [800, 798, 800, 800, 800, 804, 804, 799, 799, 800, 798, 798]
drnnGRUtanhMakespan15 = [805, 800, 795, 800, 804, 795, 800, 798, 799, 798, 800, 796]
drnnGRUtanhMakespan16 = [806, 795, 801, 799, 799, 796, 796, 794, 802, 796, 800, 802]
drnnGRUtanhMakespan17 = [796, 800, 798, 800, 794, 800, 804, 805, 798, 810, 800, 798]
drnnGRUtanhMakespan18 = [798, 800, 794, 794, 797, 798, 800, 805, 798, 798, 804, 798]
drnnGRUtanhMakespan19 = [796, 800, 806, 799, 796, 800, 798, 805, 798, 799, 797, 805]
drnnGRUtanhMakespan20 = [805, 800, 799, 796, 805, 805, 805, 794, 809, 796, 800, 797]
drnnGRUtanhMakespan21 = [798, 800, 800, 800, 798, 801, 796, 801, 801, 801, 795, 799]
drnnGRUtanhMakespan22 = [798, 801, 797, 800, 799, 795, 799, 799, 800, 801, 800, 799]
drnnGRUtanhMakespan23 = [800, 798, 799, 805, 794, 800, 798, 796, 796, 804, 800, 794]
drnnGRUtanhMakespan24 = [800, 800, 798, 805, 804, 799, 798, 801, 800, 798, 798, 798]
drnnGRUtanhMakespan25 = [798, 798, 798, 795, 800, 803, 798, 798, 800, 799, 796, 798]
drnnGRUtanhMakespan26 = [796, 798, 798, 798, 805, 796, 798, 798, 805, 795, 801, 796]
drnnGRUtanhMakespan27 = [794, 796, 796, 800, 800, 798, 800, 798, 802, 798, 797, 798]
drnnGRUtanhMakespan28 = [799, 799, 800, 800, 798, 802, 799, 798, 795, 795, 794, 798]
drnnGRUtanhMakespan29 = [798, 796, 796, 797, 796, 798, 800, 800, 796, 798, 800, 795]
drnnGRUtanhMakespan30 = [799, 798, 795, 795, 800, 795, 798, 798, 799, 798, 805, 799]
drnnGRUtanhMakespan31 = [795, 799, 794, 794, 796, 795, 795, 794, 798, 797, 798, 795]
drnnGRUtanhMakespan32 = [797, 798, 795, 796, 798, 795, 797, 798, 795, 794, 795, 796]
drnnGRUtanhMakespan33 = [799, 795, 794, 794, 798, 795, 798, 797, 800, 796, 795, 794]
drnnGRUtanhMakespan34 = [798, 795, 798, 796, 798, 794, 796, 798, 798, 798, 796, 797]
drnnGRUtanhMakespan35 = [795, 798, 796, 798, 794, 801, 795, 800, 795, 800, 794, 800]
drnnGRUtanhMakespan36 = [798, 799, 796, 797, 795, 794, 800, 795, 795, 794, 795, 795]
drnnGRUtanhMakespan37 = [799, 798, 795, 795, 794, 795, 795, 796, 805, 795, 798, 796]
drnnGRUtanhMakespan38 = [798, 794, 795, 795, 795, 796, 795, 796, 800, 798, 797, 796]
drnnGRUtanhMakespan39 = [794, 795, 795, 797, 795, 795, 794, 794, 798, 795, 794, 798]
drnnGRUtanhMakespan40 = [795, 795, 795, 795, 795, 795, 794, 794, 793, 797, 794, 795]
drnnGRUtanhMakespan41 = [794, 794, 795, 793, 795, 795, 792, 794, 795, 794, 794, 794]
drnnGRUtanhMakespan42 = [795, 795, 795, 796, 794, 797, 795, 795, 792, 795, 796, 793]
drnnGRUtanhMakespan43 = [794, 795, 795, 794, 795, 794, 798, 794, 797, 795, 794, 794]
drnnGRUtanhMakespan44 = [795, 795, 793, 794, 795, 794, 795, 795, 794, 794, 795, 794]
drnnGRUtanhMakespan45 = [794, 794, 794, 794, 794, 794, 795, 794, 794, 794, 796, 795]
drnnGRUtanhMakespan46 = [795, 794, 795, 794, 794, 794, 793, 794, 795, 795, 794, 797]
drnnGRUtanhMakespan47 = [794, 794, 794, 794, 795, 794, 795, 792, 794, 795, 794, 794]
drnnGRUtanhMakespan48 = [795, 794, 794, 794, 795, 798, 794, 794, 794, 795, 794, 794]
drnnGRUtanhMakespan49 = [795, 795, 794, 795, 793, 795, 796, 794, 795, 794, 794, 797]
drnnGRUtanhRewards0 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards1 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards2 = [-0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872]
drnnGRUtanhRewards3 = [-0.17725973169122497, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1776214552648934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1778021978021978]
drnnGRUtanhRewards4 = [-0.177078750549934, -0.1763540290620872, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUtanhRewards5 = [-0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.1768976897689769, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUtanhRewards6 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.1763540290620872, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards7 = [-0.17580964970257765, -0.17617264919621228, -0.17798286090969018, -0.177078750549934, -0.17798286090969018, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUtanhRewards8 = [-0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17580964970257765, -0.1778021978021978]
drnnGRUtanhRewards9 = [-0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.177078750549934, -0.1763540290620872, -0.17798286090969018, -0.1763540290620872]
drnnGRUtanhRewards10 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards11 = [-0.17544633017412387, -0.17526455026455026, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228]
drnnGRUtanhRewards12 = [-0.1759911894273128, -0.1759911894273128, -0.1778021978021978, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17562802996914942, -0.177078750549934, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards13 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17617264919621228]
drnnGRUtanhRewards14 = [-0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1768976897689769, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards15 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.1768976897689769, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387]
drnnGRUtanhRewards16 = [-0.17725973169122497, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17508269018743108, -0.17653532907770195, -0.17544633017412387, -0.17617264919621228, -0.17653532907770195]
drnnGRUtanhRewards17 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.1768976897689769, -0.177078750549934, -0.17580964970257765, -0.17798286090969018, -0.17617264919621228, -0.17580964970257765]
drnnGRUtanhRewards18 = [-0.17580964970257765, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1768976897689769, -0.17580964970257765]
drnnGRUtanhRewards19 = [-0.17544633017412387, -0.17617264919621228, -0.17725973169122497, -0.1759911894273128, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17562802996914942, -0.1759911894273128, -0.177078750549934]
drnnGRUtanhRewards20 = [-0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.177078750549934, -0.177078750549934, -0.177078750549934, -0.17508269018743108, -0.1778021978021978, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942]
drnnGRUtanhRewards21 = [-0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17544633017412387, -0.1763540290620872, -0.1763540290620872, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUtanhRewards22 = [-0.17580964970257765, -0.1763540290620872, -0.17562802996914942, -0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1763540290620872, -0.17617264919621228, -0.1759911894273128]
drnnGRUtanhRewards23 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108]
drnnGRUtanhRewards24 = [-0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.177078750549934, -0.1768976897689769, -0.17580964970257765, -0.1763540290620872, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765]
drnnGRUtanhRewards25 = [-0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUtanhRewards26 = [-0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387]
drnnGRUtanhRewards27 = [-0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765]
drnnGRUtanhRewards28 = [-0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drnnGRUtanhRewards29 = [-0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026]
drnnGRUtanhRewards30 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.1759911894273128]
drnnGRUtanhRewards31 = [-0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026]
drnnGRUtanhRewards32 = [-0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards33 = [-0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards34 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942]
drnnGRUtanhRewards35 = [-0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drnnGRUtanhRewards36 = [-0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drnnGRUtanhRewards37 = [-0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.177078750549934, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUtanhRewards38 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387]
drnnGRUtanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765]
drnnGRUtanhRewards40 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026]
drnnGRUtanhRewards41 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards42 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.1749007498897221]
drnnGRUtanhRewards43 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards44 = [-0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drnnGRUtanhRewards45 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drnnGRUtanhRewards46 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942]
drnnGRUtanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drnnGRUtanhRewards49 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942]
# Deep Recurrent Reinforcement Learning: 1 capa GRU y 4 capas Dense, Funcion de activacion relu, 12 episodes, 50 iteraciones
drnnGRUreluMakespan0 = [800, 799, 798, 797, 798, 800, 800, 796, 800, 794, 800, 800]
drnnGRUreluMakespan1 = [798, 800, 805, 795, 799, 808, 795, 800, 796, 798, 799, 798]
drnnGRUreluMakespan2 = [799, 800, 806, 800, 800, 805, 805, 798, 799, 807, 800, 800]
drnnGRUreluMakespan3 = [798, 795, 799, 800, 800, 796, 798, 800, 800, 804, 805, 800]
drnnGRUreluMakespan4 = [811, 800, 799, 800, 805, 798, 798, 799, 796, 804, 805, 804]
drnnGRUreluMakespan5 = [799, 795, 797, 800, 798, 800, 800, 798, 800, 797, 800, 798]
drnnGRUreluMakespan6 = [798, 800, 798, 799, 797, 798, 800, 796, 801, 799, 795, 798]
drnnGRUreluMakespan7 = [800, 804, 795, 801, 796, 806, 805, 798, 800, 799, 799, 804]
drnnGRUreluMakespan8 = [800, 799, 799, 800, 805, 796, 800, 800, 810, 796, 800, 798]
drnnGRUreluMakespan9 = [794, 800, 799, 805, 800, 800, 798, 798, 796, 795, 798, 796]
drnnGRUreluMakespan10 = [798, 800, 798, 801, 795, 802, 796, 809, 800, 800, 798, 795]
drnnGRUreluMakespan11 = [804, 800, 799, 799, 798, 803, 798, 798, 805, 803, 800, 796]
drnnGRUreluMakespan12 = [800, 799, 805, 797, 798, 796, 799, 794, 799, 805, 799, 800]
drnnGRUreluMakespan13 = [796, 800, 798, 800, 795, 799, 800, 804, 800, 794, 805, 805]
drnnGRUreluMakespan14 = [800, 795, 796, 798, 798, 801, 805, 794, 800, 801, 801, 796]
drnnGRUreluMakespan15 = [798, 800, 796, 796, 798, 794, 797, 800, 796, 801, 795, 799]
drnnGRUreluMakespan16 = [800, 805, 794, 800, 799, 800, 805, 801, 798, 800, 801, 799]
drnnGRUreluMakespan17 = [797, 803, 801, 808, 794, 799, 799, 800, 805, 796, 801, 796]
drnnGRUreluMakespan18 = [805, 800, 800, 804, 799, 798, 800, 799, 804, 796, 800, 804]
drnnGRUreluMakespan19 = [804, 798, 800, 799, 799, 799, 805, 795, 801, 799, 799, 805]
drnnGRUreluMakespan20 = [799, 804, 796, 798, 796, 798, 800, 805, 799, 810, 800, 800]
drnnGRUreluMakespan21 = [798, 799, 799, 805, 798, 798, 805, 798, 794, 799, 798, 798]
drnnGRUreluMakespan22 = [799, 798, 798, 796, 798, 805, 799, 798, 798, 799, 796, 798]
drnnGRUreluMakespan23 = [798, 805, 808, 798, 798, 805, 810, 796, 804, 799, 800, 799]
drnnGRUreluMakespan24 = [798, 796, 798, 795, 800, 798, 799, 798, 797, 805, 798, 800]
drnnGRUreluMakespan25 = [799, 796, 799, 798, 805, 798, 798, 800, 796, 794, 810, 798]
drnnGRUreluMakespan26 = [799, 798, 805, 800, 802, 798, 799, 799, 799, 794, 802, 797]
drnnGRUreluMakespan27 = [798, 800, 805, 796, 798, 795, 802, 796, 798, 800, 798, 794]
drnnGRUreluMakespan28 = [796, 805, 798, 800, 800, 798, 810, 798, 798, 798, 796, 796]
drnnGRUreluMakespan29 = [800, 798, 798, 802, 794, 798, 796, 808, 800, 800, 798, 799]
drnnGRUreluMakespan30 = [798, 796, 798, 798, 794, 798, 794, 800, 796, 794, 800, 800]
drnnGRUreluMakespan31 = [794, 802, 797, 799, 798, 800, 799, 799, 796, 796, 798, 798]
drnnGRUreluMakespan32 = [799, 798, 794, 795, 798, 805, 804, 797, 795, 800, 796, 798]
drnnGRUreluMakespan33 = [803, 799, 805, 796, 794, 798, 797, 798, 798, 794, 794, 798]
drnnGRUreluMakespan34 = [810, 796, 795, 798, 799, 798, 796, 795, 795, 797, 798, 798]
drnnGRUreluMakespan35 = [799, 799, 799, 799, 795, 798, 795, 800, 796, 795, 795, 796]
drnnGRUreluMakespan36 = [795, 797, 798, 799, 799, 799, 800, 794, 796, 795, 798, 800]
drnnGRUreluMakespan37 = [800, 798, 799, 794, 800, 796, 798, 798, 797, 800, 794, 798]
drnnGRUreluMakespan38 = [800, 799, 794, 796, 795, 800, 796, 804, 800, 795, 800, 798]
drnnGRUreluMakespan39 = [794, 798, 795, 804, 805, 799, 798, 800, 796, 798, 795, 794]
drnnGRUreluMakespan40 = [799, 798, 796, 798, 798, 799, 800, 796, 798, 798, 799, 798]
drnnGRUreluMakespan41 = [796, 798, 800, 797, 799, 796, 797, 796, 799, 804, 805, 798]
drnnGRUreluMakespan42 = [798, 794, 795, 799, 799, 798, 797, 798, 798, 798, 798, 795]
drnnGRUreluMakespan43 = [799, 798, 794, 794, 795, 794, 795, 799, 799, 800, 799, 794]
drnnGRUreluMakespan44 = [795, 796, 795, 799, 794, 795, 794, 796, 795, 794, 795, 796]
drnnGRUreluMakespan45 = [794, 797, 794, 795, 796, 795, 794, 799, 795, 794, 798, 798]
drnnGRUreluMakespan46 = [795, 795, 794, 795, 794, 794, 792, 794, 795, 797, 794, 794]
drnnGRUreluMakespan47 = [798, 796, 797, 798, 794, 798, 794, 797, 794, 803, 798, 798]
drnnGRUreluMakespan48 = [795, 794, 796, 798, 795, 794, 796, 795, 796, 794, 796, 796]
drnnGRUreluMakespan49 = [798, 798, 796, 798, 798, 796, 796, 798, 798, 798, 796, 798]
drnnGRUreluRewards0 = [-0.17617264919621228, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards1 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17526455026455026, -0.1759911894273128, -0.1776214552648934, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards2 = [-0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228, -0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.1759911894273128, -0.1774406332453826, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards3 = [-0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.177078750549934, -0.17617264919621228]
drnnGRUreluRewards4 = [-0.1781634446397188, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.1768976897689769, -0.177078750549934, -0.1768976897689769]
drnnGRUreluRewards5 = [-0.1759911894273128, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards6 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765]
drnnGRUreluRewards7 = [-0.17617264919621228, -0.1768976897689769, -0.17526455026455026, -0.1763540290620872, -0.17544633017412387, -0.17725973169122497, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1768976897689769]
drnnGRUreluRewards8 = [-0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17617264919621228, -0.17617264919621228, -0.17798286090969018, -0.17544633017412387, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards9 = [-0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drnnGRUreluRewards10 = [-0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.1778021978021978, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards11 = [-0.1768976897689769, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17671654929577466, -0.17617264919621228, -0.17544633017412387]
drnnGRUreluRewards12 = [-0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17562802996914942, -0.17580964970257765, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.1759911894273128, -0.177078750549934, -0.1759911894273128, -0.17617264919621228]
drnnGRUreluRewards13 = [-0.17544633017412387, -0.17617264919621228, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.1768976897689769, -0.17617264919621228, -0.17508269018743108, -0.177078750549934, -0.177078750549934]
drnnGRUreluRewards14 = [-0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1763540290620872, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards15 = [-0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.1763540290620872, -0.17526455026455026, -0.1759911894273128]
drnnGRUreluRewards16 = [-0.17617264919621228, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.177078750549934, -0.1763540290620872, -0.17580964970257765, -0.17617264919621228, -0.1763540290620872, -0.1759911894273128]
drnnGRUreluRewards17 = [-0.17562802996914942, -0.17671654929577466, -0.1763540290620872, -0.1776214552648934, -0.17508269018743108, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.1763540290620872, -0.17544633017412387]
drnnGRUreluRewards18 = [-0.177078750549934, -0.17617264919621228, -0.17617264919621228, -0.1768976897689769, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17617264919621228, -0.1768976897689769]
drnnGRUreluRewards19 = [-0.1768976897689769, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128, -0.1759911894273128, -0.177078750549934]
drnnGRUreluRewards20 = [-0.1759911894273128, -0.1768976897689769, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards21 = [-0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards22 = [-0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards23 = [-0.17580964970257765, -0.177078750549934, -0.1776214552648934, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17798286090969018, -0.17544633017412387, -0.1768976897689769, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128]
drnnGRUreluRewards24 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.177078750549934, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards25 = [-0.1759911894273128, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17798286090969018, -0.17580964970257765]
drnnGRUreluRewards26 = [-0.1759911894273128, -0.17580964970257765, -0.177078750549934, -0.17617264919621228, -0.17653532907770195, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17508269018743108, -0.17653532907770195, -0.17562802996914942]
drnnGRUreluRewards27 = [-0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17653532907770195, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.17508269018743108]
drnnGRUreluRewards28 = [-0.17544633017412387, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.17798286090969018, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards29 = [-0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.17653532907770195, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.1776214552648934, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128]
drnnGRUreluRewards30 = [-0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228]
drnnGRUreluRewards31 = [-0.17508269018743108, -0.17653532907770195, -0.17562802996914942, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards32 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.1768976897689769, -0.177078750549934, -0.17562802996914942, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drnnGRUreluRewards33 = [-0.17671654929577466, -0.1759911894273128, -0.177078750549934, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards34 = [-0.17798286090969018, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards35 = [-0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards36 = [-0.17526455026455026, -0.17562802996914942, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228]
drnnGRUreluRewards37 = [-0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17580964970257765]
drnnGRUreluRewards38 = [-0.17617264919621228, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.1768976897689769, -0.17617264919621228, -0.17526455026455026, -0.17617264919621228, -0.17580964970257765]
drnnGRUreluRewards39 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.177078750549934, -0.1759911894273128, -0.17580964970257765, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108]
drnnGRUreluRewards40 = [-0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drnnGRUreluRewards41 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.1768976897689769, -0.177078750549934, -0.17580964970257765]
drnnGRUreluRewards42 = [-0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026]
drnnGRUreluRewards43 = [-0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.1759911894273128, -0.17617264919621228, -0.1759911894273128, -0.17508269018743108]
drnnGRUreluRewards44 = [-0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drnnGRUreluRewards45 = [-0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards46 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108]
drnnGRUreluRewards47 = [-0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17671654929577466, -0.17580964970257765, -0.17580964970257765]
drnnGRUreluRewards48 = [-0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drnnGRUreluRewards49 = [-0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion tanh, 12 episodios, 50 iteraciones
drlTanhMakespan0 = [794, 794, 805, 799, 810, 800, 794, 810, 804, 806, 812, 808]
drlTanhMakespan1 = [796, 795, 795, 798, 799, 800, 800, 795, 797, 796, 797, 799]
drlTanhMakespan2 = [800, 797, 798, 801, 799, 800, 796, 795, 797, 796, 794, 798]
drlTanhMakespan3 = [800, 795, 799, 796, 799, 798, 795, 799, 795, 799, 798, 796]
drlTanhMakespan4 = [809, 795, 795, 800, 797, 795, 798, 798, 799, 799, 798, 798]
drlTanhMakespan5 = [795, 795, 795, 799, 795, 798, 795, 800, 795, 796, 795, 805]
drlTanhMakespan6 = [794, 800, 795, 793, 798, 795, 794, 798, 795, 799, 795, 796]
drlTanhMakespan7 = [795, 795, 795, 795, 798, 795, 797, 797, 795, 795, 798, 797]
drlTanhMakespan8 = [795, 795, 795, 794, 800, 800, 794, 795, 794, 794, 797, 795]
drlTanhMakespan9 = [793, 794, 796, 795, 796, 800, 794, 797, 793, 795, 798, 795]
drlTanhMakespan10 = [795, 795, 797, 794, 795, 798, 797, 795, 798, 794, 794, 794]
drlTanhMakespan11 = [795, 795, 795, 795, 797, 795, 795, 794, 795, 795, 795, 794]
drlTanhMakespan12 = [794, 798, 795, 794, 795, 795, 795, 797, 799, 795, 795, 795]
drlTanhMakespan13 = [795, 797, 795, 800, 796, 795, 796, 795, 795, 795, 798, 794]
drlTanhMakespan14 = [795, 795, 796, 794, 794, 794, 797, 795, 798, 795, 795, 793]
drlTanhMakespan15 = [799, 794, 795, 795, 795, 796, 801, 797, 795, 794, 795, 799]
drlTanhMakespan16 = [795, 795, 796, 798, 795, 795, 795, 795, 795, 798, 798, 796]
drlTanhMakespan17 = [800, 798, 795, 795, 798, 794, 795, 795, 797, 795, 796, 794]
drlTanhMakespan18 = [797, 800, 798, 797, 796, 794, 799, 797, 795, 796, 799, 798]
drlTanhMakespan19 = [797, 800, 795, 794, 794, 796, 795, 798, 796, 798, 797, 795]
drlTanhMakespan20 = [794, 795, 795, 799, 798, 797, 795, 795, 798, 795, 798, 795]
drlTanhMakespan21 = [796, 795, 795, 795, 795, 797, 798, 794, 797, 795, 796, 794]
drlTanhMakespan22 = [799, 796, 795, 795, 795, 795, 796, 795, 796, 798, 796, 795]
drlTanhMakespan23 = [799, 799, 795, 796, 796, 799, 796, 797, 794, 794, 798, 796]
drlTanhMakespan24 = [795, 795, 797, 800, 797, 795, 795, 796, 795, 795, 798, 799]
drlTanhMakespan25 = [795, 797, 795, 795, 795, 795, 800, 796, 795, 797, 795, 795]
drlTanhMakespan26 = [795, 795, 799, 794, 797, 794, 794, 798, 794, 796, 795, 798]
drlTanhMakespan27 = [796, 796, 795, 796, 798, 797, 794, 795, 794, 794, 794, 798]
drlTanhMakespan28 = [795, 795, 794, 798, 796, 796, 800, 797, 797, 796, 795, 794]
drlTanhMakespan29 = [795, 795, 798, 800, 797, 794, 796, 794, 792, 794, 794, 795]
drlTanhMakespan30 = [798, 797, 795, 799, 797, 800, 798, 799, 797, 800, 794, 796]
drlTanhMakespan31 = [794, 795, 800, 798, 800, 794, 800, 798, 799, 798, 798, 798]
drlTanhMakespan32 = [795, 795, 795, 794, 794, 794, 793, 795, 794, 793, 794, 795]
drlTanhMakespan33 = [794, 797, 792, 794, 795, 795, 797, 795, 795, 794, 792, 795]
drlTanhMakespan34 = [795, 794, 795, 798, 795, 796, 794, 795, 794, 794, 795, 794]
drlTanhMakespan35 = [796, 794, 797, 793, 794, 798, 795, 794, 793, 793, 795, 794]
drlTanhMakespan36 = [795, 795, 794, 795, 795, 795, 794, 795, 795, 793, 795, 794]
drlTanhMakespan37 = [794, 794, 798, 794, 794, 796, 795, 794, 793, 795, 795, 792]
drlTanhMakespan38 = [794, 796, 795, 794, 798, 798, 795, 795, 794, 794, 795, 794]
drlTanhMakespan39 = [794, 795, 795, 796, 792, 794, 795, 794, 795, 794, 794, 795]
drlTanhMakespan40 = [798, 795, 794, 795, 794, 794, 793, 795, 794, 794, 797, 794]
drlTanhMakespan41 = [795, 792, 795, 794, 794, 795, 794, 795, 792, 797, 795, 795]
drlTanhMakespan42 = [792, 794, 794, 795, 794, 794, 795, 794, 792, 794, 794, 794]
drlTanhMakespan43 = [794, 796, 794, 793, 795, 795, 793, 798, 794, 794, 798, 794]
drlTanhMakespan44 = [794, 794, 794, 794, 795, 794, 793, 794, 794, 795, 795, 794]
drlTanhMakespan45 = [790, 794, 793, 794, 793, 794, 795, 794, 791, 795, 795, 794]
drlTanhMakespan46 = [792, 794, 794, 794, 794, 794, 794, 793, 794, 794, 794, 794]
drlTanhMakespan47 = [794, 794, 794, 794, 794, 794, 794, 794, 792, 795, 793, 795]
drlTanhMakespan48 = [794, 794, 792, 792, 797, 794, 792, 794, 794, 795, 794, 795]
drlTanhMakespan49 = [795, 794, 794, 796, 794, 797, 794, 794, 794, 794, 794, 794]
drlTanhMakespan50 = [794, 792, 795, 794, 794, 794, 794, 794, 795, 794, 795, 794]
drlTanhMakespan51 = [794, 792, 796, 795, 794, 794, 795, 794, 795, 795, 795, 794]
drlTanhMakespan52 = [794, 794, 795, 792, 795, 795, 795, 792, 794, 793, 795, 794]
drlTanhMakespan53 = [794, 792, 794, 792, 794, 794, 794, 795, 795, 794, 794, 792]
drlTanhMakespan54 = [795, 793, 794, 794, 794, 792, 795, 794, 794, 792, 794, 796]
drlTanhMakespan55 = [795, 794, 794, 795, 795, 793, 794, 795, 794, 797, 795, 792]
drlTanhMakespan56 = [795, 795, 792, 795, 794, 795, 794, 794, 794, 795, 795, 795]
drlTanhMakespan57 = [795, 792, 795, 794, 795, 795, 792, 795, 794, 797, 792, 792]
drlTanhMakespan58 = [795, 795, 794, 795, 792, 794, 794, 794, 792, 792, 792, 793]
drlTanhMakespan59 = [795, 794, 792, 794, 794, 794, 792, 794, 794, 794, 793, 795]
drlTanhMakespan60 = [794, 795, 795, 795, 798, 794, 794, 794, 794, 794, 794, 792]
drlTanhMakespan61 = [792, 795, 794, 794, 795, 794, 792, 795, 795, 794, 794, 795]
drlTanhMakespan62 = [795, 794, 794, 794, 799, 794, 792, 794, 795, 795, 794, 793]
drlTanhMakespan63 = [791, 795, 792, 796, 794, 794, 792, 795, 793, 794, 792, 794]
drlTanhRewards0 = [-0.17508269018743108, -0.17508269018743108, -0.177078750549934, -0.1759911894273128, -0.17798286090969018, -0.17617264919621228, -0.17508269018743108, -0.17798286090969018, -0.1768976897689769, -0.17725973169122497, -0.17834394904458598, -0.1776214552648934]
drlTanhRewards1 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.1759911894273128, -0.17617264919621228, -0.17526455026455026, -0.17562802996914942, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlTanhRewards2 = [-0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17580964970257765]
drlTanhRewards3 = [-0.17617264919621228, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards4 = [-0.1778021978021978, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards5 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17617264919621228, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.177078750549934]
drlTanhRewards6 = [-0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17544633017412387]
drlTanhRewards7 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942]
drlTanhRewards8 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards9 = [-0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17617264919621228, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards10 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards11 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards12 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards13 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards14 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1749007498897221]
drlTanhRewards15 = [-0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1763540290620872, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128]
drlTanhRewards16 = [-0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards17 = [-0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards18 = [-0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.17562802996914942, -0.17544633017412387, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765]
drlTanhRewards19 = [-0.17562802996914942, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026]
drlTanhRewards20 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026]
drlTanhRewards21 = [-0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108]
drlTanhRewards22 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026]
drlTanhRewards23 = [-0.1759911894273128, -0.1759911894273128, -0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387]
drlTanhRewards24 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.1759911894273128]
drlTanhRewards25 = [-0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17544633017412387, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards26 = [-0.17526455026455026, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765]
drlTanhRewards27 = [-0.17544633017412387, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards28 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17562802996914942, -0.17544633017412387, -0.17617264919621228, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards29 = [-0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17562802996914942, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards30 = [-0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387]
drlTanhRewards31 = [-0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765]
drlTanhRewards32 = [-0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards33 = [-0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026]
drlTanhRewards34 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards35 = [-0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.1749007498897221, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards36 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards37 = [-0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards38 = [-0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards39 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108]
drlTanhRewards41 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards42 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards43 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlTanhRewards44 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards45 = [-0.1749007498897221, -0.17435444714191128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17453662842012357, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards46 = [-0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards47 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards49 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards50 = [-0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108]
drlTanhRewards51 = [-0.17508269018743108, -0.17471872931833224, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards52 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026, -0.17508269018743108]
drlTanhRewards53 = [-0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards54 = [-0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17544633017412387]
drlTanhRewards55 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17471872931833224]
drlTanhRewards56 = [-0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026]
drlTanhRewards57 = [-0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17471872931833224, -0.17471872931833224]
drlTanhRewards58 = [-0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17471872931833224, -0.17471872931833224, -0.1749007498897221]
drlTanhRewards59 = [-0.17526455026455026, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17526455026455026]
drlTanhRewards60 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224]
drlTanhRewards61 = [-0.17471872931833224, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17471872931833224, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026]
drlTanhRewards62 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221]
drlTanhRewards63 = [-0.17453662842012357, -0.17471872931833224, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17526455026455026, -0.1749007498897221, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108]
# Deep Reinforcement Learning: 5 capas Dense, Funcion de activacion relu, 12 episodios, 50 iteraciones
drlReluMakespan0 = [796, 798, 809, 798, 796, 800, 798, 799, 800, 794, 800, 798]
drlReluMakespan1 = [800, 800, 801, 806, 804, 806, 808, 798, 796, 796, 798, 800]
drlReluMakespan2 = [805, 805, 798, 800, 800, 798, 801, 799, 800, 806, 800, 800]
drlReluMakespan3 = [798, 799, 798, 795, 798, 808, 803, 800, 798, 795, 799, 800]
drlReluMakespan4 = [805, 805, 799, 796, 798, 803, 799, 800, 800, 800, 795, 794]
drlReluMakespan5 = [799, 796, 795, 800, 801, 796, 800, 795, 803, 800, 800, 805]
drlReluMakespan6 = [799, 795, 798, 794, 805, 796, 795, 799, 798, 795, 804, 796]
drlReluMakespan7 = [795, 798, 799, 798, 798, 799, 795, 794, 796, 794, 795, 805]
drlReluMakespan8 = [805, 794, 794, 795, 798, 795, 798, 795, 799, 800, 796, 798]
drlReluMakespan9 = [797, 797, 797, 794, 795, 794, 794, 797, 796, 795, 801, 799]
drlReluMakespan10 = [799, 794, 797, 795, 794, 794, 795, 795, 795, 796, 797, 799]
drlReluMakespan11 = [796, 798, 800, 795, 805, 794, 798, 796, 795, 794, 798, 795]
drlReluMakespan12 = [800, 795, 794, 798, 800, 805, 800, 798, 804, 799, 794, 803]
drlReluMakespan13 = [796, 799, 798, 794, 800, 794, 795, 796, 798, 795, 794, 799]
drlReluMakespan14 = [795, 798, 798, 798, 805, 798, 798, 798, 795, 794, 800, 796]
drlReluMakespan15 = [795, 798, 795, 805, 798, 794, 795, 798, 796, 794, 795, 796]
drlReluMakespan16 = [798, 795, 796, 799, 796, 798, 798, 795, 795, 795, 795, 799]
drlReluMakespan17 = [794, 798, 796, 798, 795, 801, 794, 798, 797, 795, 796, 801]
drlReluMakespan18 = [798, 795, 798, 798, 801, 798, 795, 795, 797, 800, 794, 800]
drlReluMakespan19 = [795, 798, 794, 800, 796, 795, 798, 797, 795, 794, 796, 796]
drlReluMakespan20 = [794, 794, 795, 795, 795, 795, 796, 798, 799, 799, 799, 795]
drlReluMakespan21 = [802, 796, 794, 797, 797, 800, 794, 794, 804, 803, 798, 797]
drlReluMakespan22 = [794, 795, 795, 795, 798, 795, 794, 799, 794, 803, 795, 794]
drlReluMakespan23 = [794, 798, 799, 794, 795, 795, 799, 795, 796, 795, 797, 799]
drlReluMakespan24 = [795, 794, 797, 800, 794, 795, 795, 795, 795, 800, 800, 798]
drlReluMakespan25 = [795, 794, 797, 796, 798, 795, 795, 794, 799, 795, 794, 798]
drlReluMakespan26 = [801, 795, 800, 794, 794, 796, 800, 798, 798, 799, 794, 796]
drlReluMakespan27 = [796, 795, 796, 795, 796, 795, 795, 800, 794, 794, 794, 796]
drlReluMakespan28 = [794, 794, 795, 796, 794, 795, 795, 797, 794, 794, 796, 795]
drlReluMakespan29 = [793, 794, 795, 800, 795, 795, 794, 798, 798, 796, 795, 794]
drlReluMakespan30 = [802, 794, 794, 798, 794, 796, 805, 794, 800, 794, 796, 794]
drlReluMakespan31 = [797, 794, 794, 794, 800, 800, 794, 794, 798, 795, 794, 798]
drlReluMakespan32 = [794, 798, 794, 795, 794, 795, 798, 794, 794, 795, 794, 798]
drlReluMakespan33 = [798, 794, 798, 795, 794, 793, 797, 798, 794, 794, 801, 793]
drlReluMakespan34 = [794, 798, 794, 795, 794, 793, 798, 795, 794, 800, 794, 795]
drlReluMakespan35 = [794, 796, 794, 796, 806, 795, 795, 795, 796, 795, 795, 799]
drlReluMakespan36 = [795, 794, 794, 796, 796, 798, 794, 796, 794, 795, 794, 795]
drlReluMakespan37 = [795, 794, 795, 798, 794, 794, 794, 794, 794, 794, 795, 797]
drlReluMakespan38 = [794, 798, 794, 798, 797, 794, 794, 795, 795, 794, 795, 795]
drlReluMakespan39 = [797, 794, 795, 796, 796, 796, 798, 794, 794, 795, 794, 798]
drlReluMakespan40 = [798, 795, 795, 798, 792, 795, 795, 794, 795, 794, 798, 794]
drlReluMakespan41 = [795, 794, 794, 794, 794, 794, 798, 793, 794, 794, 794, 793]
drlReluMakespan42 = [794, 794, 794, 794, 799, 794, 795, 794, 796, 794, 794, 794]
drlReluMakespan43 = [794, 797, 795, 794, 795, 794, 794, 795, 794, 794, 793, 794]
drlReluMakespan44 = [794, 792, 793, 794, 794, 796, 794, 798, 795, 794, 794, 796]
drlReluMakespan45 = [795, 794, 799, 794, 794, 793, 794, 795, 795, 793, 796, 794]
drlReluMakespan46 = [794, 796, 794, 794, 794, 794, 794, 793, 799, 792, 794, 794]
drlReluMakespan47 = [795, 794, 793, 794, 796, 797, 794, 794, 795, 794, 794, 794]
drlReluMakespan48 = [794, 794, 794, 792, 794, 794, 795, 794, 794, 794, 794, 794]
drlReluMakespan49 = [794, 794, 795, 792, 797, 797, 794, 794, 792, 800, 795, 795]
drlReluRewards0 = [-0.17544633017412387, -0.17580964970257765, -0.1778021978021978, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards1 = [-0.17617264919621228, -0.17617264919621228, -0.1763540290620872, -0.17725973169122497, -0.1768976897689769, -0.17725973169122497, -0.1776214552648934, -0.17580964970257765, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17617264919621228]
drlReluRewards2 = [-0.177078750549934, -0.177078750549934, -0.17580964970257765, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765, -0.1763540290620872, -0.1759911894273128, -0.17617264919621228, -0.17725973169122497, -0.17617264919621228, -0.17617264919621228]
drlReluRewards3 = [-0.17580964970257765, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.1776214552648934, -0.17671654929577466, -0.17617264919621228, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228]
drlReluRewards4 = [-0.177078750549934, -0.177078750549934, -0.1759911894273128, -0.17544633017412387, -0.17580964970257765, -0.17671654929577466, -0.1759911894273128, -0.17617264919621228, -0.17617264919621228, -0.17617264919621228, -0.17526455026455026, -0.17508269018743108]
drlReluRewards5 = [-0.1759911894273128, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.1763540290620872, -0.17544633017412387, -0.17526455026455026, -0.17617264919621228, -0.17671654929577466, -0.17617264919621228, -0.17617264919621228, -0.177078750549934]
drlReluRewards6 = [-0.1759911894273128, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.177078750549934, -0.17544633017412387, -0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17526455026455026, -0.1768976897689769, -0.17544633017412387]
drlReluRewards7 = [-0.17526455026455026, -0.1759911894273128, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.177078750549934]
drlReluRewards8 = [-0.177078750549934, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17617264919621228, -0.17544633017412387, -0.17580964970257765]
drlReluRewards9 = [-0.17562802996914942, -0.17562802996914942, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17526455026455026, -0.1763540290620872, -0.1759911894273128]
drlReluRewards10 = [-0.1759911894273128, -0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17562802996914942, -0.1759911894273128]
drlReluRewards11 = [-0.17544633017412387, -0.17580964970257765, -0.17617264919621228, -0.17526455026455026, -0.177078750549934, -0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026]
drlReluRewards12 = [-0.17617264919621228, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17617264919621228, -0.177078750549934, -0.17617264919621228, -0.17580964970257765, -0.1768976897689769, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466]
drlReluRewards13 = [-0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128]
drlReluRewards14 = [-0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.177078750549934, -0.17580964970257765, -0.17580964970257765, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387]
drlReluRewards15 = [-0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.177078750549934, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387]
drlReluRewards16 = [-0.17580964970257765, -0.17526455026455026, -0.17544633017412387, -0.1759911894273128, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards17 = [-0.17508269018743108, -0.17580964970257765, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.1763540290620872, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17544633017412387, -0.1763540290620872]
drlReluRewards18 = [-0.17580964970257765, -0.17526455026455026, -0.17580964970257765, -0.17580964970257765, -0.1763540290620872, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17617264919621228]
drlReluRewards19 = [-0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17617264919621228, -0.17544633017412387, -0.17526455026455026, -0.17580964970257765, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387]
drlReluRewards20 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17580964970257765, -0.1759911894273128, -0.1759911894273128, -0.1759911894273128, -0.17526455026455026]
drlReluRewards21 = [-0.17653532907770195, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1768976897689769, -0.17671654929577466, -0.17562802996914942]
drlReluRewards22 = [-0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17671654929577466, -0.17526455026455026, -0.17508269018743108]
drlReluRewards23 = [-0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.1759911894273128, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17562802996914942, -0.1759911894273128]
drlReluRewards24 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17617264919621228, -0.17580964970257765]
drlReluRewards25 = [-0.17526455026455026, -0.17508269018743108, -0.17562802996914942, -0.17544633017412387, -0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards26 = [-0.1763540290620872, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17617264919621228, -0.17580964970257765, -0.17580964970257765, -0.1759911894273128, -0.17508269018743108, -0.17544633017412387]
drlReluRewards27 = [-0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards28 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026]
drlReluRewards29 = [-0.1749007498897221, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17580964970257765, -0.17544633017412387, -0.17526455026455026, -0.17508269018743108]
drlReluRewards30 = [-0.17653532907770195, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.177078750549934, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108]
drlReluRewards31 = [-0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17617264919621228, -0.17617264919621228, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765]
drlReluRewards32 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards33 = [-0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17562802996914942, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.1763540290620872, -0.1749007498897221]
drlReluRewards34 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17617264919621228, -0.17508269018743108, -0.17526455026455026]
drlReluRewards35 = [-0.17508269018743108, -0.17544633017412387, -0.17725973169122497, -0.17508269018743108, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.17526455026455026, -0.17544633017412387, -0.17526455026455026, -0.17526455026455026, -0.1759911894273128]
drlReluRewards36 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026]
drlReluRewards37 = [-0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17562802996914942]
drlReluRewards38 = [-0.17508269018743108, -0.17580964970257765, -0.17508269018743108, -0.17580964970257765, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026]
drlReluRewards39 = [-0.17562802996914942, -0.17508269018743108, -0.17526455026455026, -0.17544633017412387, -0.17544633017412387, -0.17544633017412387, -0.17580964970257765, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765]
drlReluRewards40 = [-0.17580964970257765, -0.17526455026455026, -0.17526455026455026, -0.17580964970257765, -0.17471872931833224, -0.17526455026455026, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17580964970257765, -0.17508269018743108]
drlReluRewards41 = [-0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17580964970257765, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221]
drlReluRewards42 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards43 = [-0.17508269018743108, -0.17562802996914942, -0.17526455026455026, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108]
drlReluRewards44 = [-0.17508269018743108, -0.17471872931833224, -0.1749007498897221, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17580964970257765, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17544633017412387]
drlReluRewards45 = [-0.17526455026455026, -0.17508269018743108, -0.1759911894273128, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17526455026455026, -0.17526455026455026, -0.1749007498897221, -0.17544633017412387, -0.17508269018743108]
drlReluRewards46 = [-0.17508269018743108, -0.17544633017412387, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.1749007498897221, -0.1759911894273128, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108]
drlReluRewards47 = [-0.17526455026455026, -0.17508269018743108, -0.1749007498897221, -0.17508269018743108, -0.17544633017412387, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards48 = [-0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108, -0.17508269018743108]
drlReluRewards49 = [-0.17508269018743108, -0.17508269018743108, -0.17526455026455026, -0.17471872931833224, -0.17562802996914942, -0.17562802996914942, -0.17508269018743108, -0.17508269018743108, -0.17471872931833224, -0.17617264919621228, -0.17526455026455026, -0.17526455026455026]
if __name__ == "__main__":
##############################################
##############################################
##############################################
# Deep Recurrent Reinforcement Learning with 1 GRU layer and 4 Dense layers
drnnGRUtanhMakespan = []
drnnGRUtanhRewards = []
drnnGRUtanhMakespanList = []
drnnGRUtanhRewardsList = []
drnnGRUtanhMakespanValues = []
drnnGRUtanhRewardsValues = []
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan0))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan1))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan2))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan3))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan4))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan5))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan6))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan7))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan8))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan9))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan10))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan11))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan12))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan13))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan14))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan15))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan16))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan17))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan18))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan19))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan20))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan21))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan22))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan23))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan24))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan25))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan26))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan27))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan28))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan29))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan30))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan31))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan32))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan33))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan34))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan35))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan36))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan37))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan38))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan39))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan40))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan41))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan42))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan43))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan44))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan45))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan46))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan47))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan48))
drnnGRUtanhMakespan.append(np.mean(drnnGRUtanhMakespan49))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards0))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards1))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards2))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards3))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards4))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards5))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards6))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards7))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards8))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards9))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards10))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards11))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards12))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards13))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards14))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards15))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards16))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards17))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards18))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards19))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards20))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards21))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards22))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards23))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards24))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards25))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards26))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards27))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards28))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards29))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards30))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards31))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards32))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards33))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards34))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards35))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards36))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards37))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards38))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards39))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards40))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards41))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards42))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards43))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards44))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards45))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards46))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards47))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards48))
drnnGRUtanhRewards.append(np.mean(drnnGRUtanhRewards49))
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan0)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan1)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan2)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan3)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan4)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan5)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan6)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan7)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan8)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan9)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan10)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan11)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan12)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan13)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan14)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan15)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan16)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan17)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan18)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan19)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan20)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan21)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan22)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan23)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan24)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan25)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan26)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan27)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan28)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan29)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan30)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan31)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan32)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan33)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan34)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan35)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan36)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan37)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan38)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan39)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan40)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan41)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan42)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan43)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan44)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan45)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan46)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan47)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan48)
drnnGRUtanhMakespanList.append(drnnGRUtanhMakespan49)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards0)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards1)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards2)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards3)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards4)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards5)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards6)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards7)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards8)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards9)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards10)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards11)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards12)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards13)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards14)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards15)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards16)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards17)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards18)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards19)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards20)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards21)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards22)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards23)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards24)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards25)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards26)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards27)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards28)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards29)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards30)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards31)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards32)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards33)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards34)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards35)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards36)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards37)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards38)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards39)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards40)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards41)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards42)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards43)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards44)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards45)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards46)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards47)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards48)
drnnGRUtanhRewardsList.append(drnnGRUtanhRewards49)
drnnGRUreluMakespan = []
drnnGRUreluRewards = []
drnnGRUreluMakespanList = []
drnnGRUreluRewardsList = []
drnnGRUreluMakespanValues = []
drnnGRUreluRewardsValues = []
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan0))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan1))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan2))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan3))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan4))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan5))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan6))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan7))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan8))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan9))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan10))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan11))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan12))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan13))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan14))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan15))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan16))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan17))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan18))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan19))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan20))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan21))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan22))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan23))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan24))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan25))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan26))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan27))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan28))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan29))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan30))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan31))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan32))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan33))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan34))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan35))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan36))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan37))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan38))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan39))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan40))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan41))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan42))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan43))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan44))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan45))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan46))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan47))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan48))
drnnGRUreluMakespan.append(np.mean(drnnGRUreluMakespan49))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards0))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards1))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards2))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards3))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards4))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards5))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards6))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards7))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards8))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards9))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards10))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards11))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards12))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards13))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards14))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards15))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards16))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards17))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards18))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards19))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards20))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards21))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards22))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards23))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards24))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards25))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards26))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards27))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards28))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards29))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards30))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards31))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards32))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards33))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards34))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards35))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards36))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards37))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards38))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards39))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards40))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards41))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards42))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards43))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards44))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards45))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards46))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards47))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards48))
drnnGRUreluRewards.append(np.mean(drnnGRUreluRewards49))
drnnGRUreluMakespanList.append(drnnGRUreluMakespan0)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan1)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan2)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan3)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan4)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan5)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan6)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan7)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan8)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan9)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan10)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan11)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan12)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan13)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan14)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan15)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan16)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan17)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan18)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan19)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan20)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan21)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan22)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan23)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan24)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan25)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan26)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan27)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan28)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan29)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan30)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan31)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan32)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan33)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan34)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan35)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan36)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan37)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan38)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan39)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan40)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan41)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan42)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan43)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan44)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan45)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan46)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan47)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan48)
drnnGRUreluMakespanList.append(drnnGRUreluMakespan49)
drnnGRUreluRewardsList.append(drnnGRUreluRewards0)
drnnGRUreluRewardsList.append(drnnGRUreluRewards1)
drnnGRUreluRewardsList.append(drnnGRUreluRewards2)
drnnGRUreluRewardsList.append(drnnGRUreluRewards3)
drnnGRUreluRewardsList.append(drnnGRUreluRewards4)
drnnGRUreluRewardsList.append(drnnGRUreluRewards5)
drnnGRUreluRewardsList.append(drnnGRUreluRewards6)
drnnGRUreluRewardsList.append(drnnGRUreluRewards7)
drnnGRUreluRewardsList.append(drnnGRUreluRewards8)
drnnGRUreluRewardsList.append(drnnGRUreluRewards9)
drnnGRUreluRewardsList.append(drnnGRUreluRewards10)
drnnGRUreluRewardsList.append(drnnGRUreluRewards11)
drnnGRUreluRewardsList.append(drnnGRUreluRewards12)
drnnGRUreluRewardsList.append(drnnGRUreluRewards13)
drnnGRUreluRewardsList.append(drnnGRUreluRewards14)
drnnGRUreluRewardsList.append(drnnGRUreluRewards15)
drnnGRUreluRewardsList.append(drnnGRUreluRewards16)
drnnGRUreluRewardsList.append(drnnGRUreluRewards17)
drnnGRUreluRewardsList.append(drnnGRUreluRewards18)
drnnGRUreluRewardsList.append(drnnGRUreluRewards19)
drnnGRUreluRewardsList.append(drnnGRUreluRewards20)
drnnGRUreluRewardsList.append(drnnGRUreluRewards21)
drnnGRUreluRewardsList.append(drnnGRUreluRewards22)
drnnGRUreluRewardsList.append(drnnGRUreluRewards23)
drnnGRUreluRewardsList.append(drnnGRUreluRewards24)
drnnGRUreluRewardsList.append(drnnGRUreluRewards25)
drnnGRUreluRewardsList.append(drnnGRUreluRewards26)
drnnGRUreluRewardsList.append(drnnGRUreluRewards27)
drnnGRUreluRewardsList.append(drnnGRUreluRewards28)
drnnGRUreluRewardsList.append(drnnGRUreluRewards29)
drnnGRUreluRewardsList.append(drnnGRUreluRewards30)
drnnGRUreluRewardsList.append(drnnGRUreluRewards31)
drnnGRUreluRewardsList.append(drnnGRUreluRewards32)
drnnGRUreluRewardsList.append(drnnGRUreluRewards33)
drnnGRUreluRewardsList.append(drnnGRUreluRewards34)
drnnGRUreluRewardsList.append(drnnGRUreluRewards35)
drnnGRUreluRewardsList.append(drnnGRUreluRewards36)
drnnGRUreluRewardsList.append(drnnGRUreluRewards37)
drnnGRUreluRewardsList.append(drnnGRUreluRewards38)
drnnGRUreluRewardsList.append(drnnGRUreluRewards39)
drnnGRUreluRewardsList.append(drnnGRUreluRewards40)
drnnGRUreluRewardsList.append(drnnGRUreluRewards41)
drnnGRUreluRewardsList.append(drnnGRUreluRewards42)
drnnGRUreluRewardsList.append(drnnGRUreluRewards43)
drnnGRUreluRewardsList.append(drnnGRUreluRewards44)
drnnGRUreluRewardsList.append(drnnGRUreluRewards45)
drnnGRUreluRewardsList.append(drnnGRUreluRewards46)
drnnGRUreluRewardsList.append(drnnGRUreluRewards47)
drnnGRUreluRewardsList.append(drnnGRUreluRewards48)
drnnGRUreluRewardsList.append(drnnGRUreluRewards49)
for vector in drnnGRUtanhMakespanList:
for element in vector:
drnnGRUtanhMakespanValues.append(element)
for vector in drnnGRUtanhRewardsList:
for element in vector:
drnnGRUtanhRewardsValues.append(element)
##################
for vector in drnnGRUreluMakespanList:
for element in vector:
drnnGRUreluMakespanValues.append(element)
for vector in drnnGRUreluRewardsList:
for element in vector:
drnnGRUreluRewardsValues.append(element)
#####################
smoothGRUtanhMakespanValues = pd.Series(drnnGRUtanhMakespanValues).rolling(12).mean()
plt.plot(smoothGRUtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
smoothGRUtanhRewardsValues = pd.Series(drnnGRUtanhRewardsValues).rolling(12).mean()
plt.plot(smoothGRUtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.show()
#####################
smoothGRUreluMakespanValues = pd.Series(drnnGRUreluMakespanValues).rolling(12).mean()
plt.plot(smoothGRUreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
smoothGRUreluRewardsValues = pd.Series(drnnGRUreluRewardsValues).rolling(12).mean()
plt.plot(smoothGRUreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU y ReLU")
plt.show()
###################
plt.plot(smoothGRUtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothGRUreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
plt.plot(smoothGRUtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothGRUreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa GRU")
plt.legend()
plt.show()
###################
drnnLSTMtanhMakespan = []
drnnLSTMtanhRewards = []
drnnLSTMtanhMakespanList = []
drnnLSTMtanhRewardsList = []
drnnLSTMtanhMakespanValues = []
drnnLSTMtanhRewardsValues = []
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan0))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan1))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan2))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan3))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan4))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan5))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan6))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan7))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan8))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan9))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan10))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan11))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan12))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan13))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan14))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan15))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan16))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan17))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan18))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan19))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan20))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan21))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan22))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan23))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan24))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan25))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan26))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan27))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan28))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan29))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan30))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan31))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan32))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan33))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan34))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan35))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan36))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan37))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan38))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan39))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan40))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan41))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan42))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan43))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan44))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan45))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan46))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan47))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan48))
drnnLSTMtanhMakespan.append(np.mean(drnnLSTMtanhMakespan49))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards0))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards1))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards2))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards3))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards4))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards5))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards6))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards7))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards8))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards9))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards10))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards11))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards12))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards13))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards14))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards15))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards16))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards17))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards18))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards19))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards20))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards21))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards22))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards23))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards24))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards25))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards26))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards27))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards28))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards29))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards30))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards31))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards32))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards33))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards34))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards35))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards36))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards37))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards38))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards39))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards40))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards41))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards42))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards43))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards44))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards45))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards46))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards47))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards48))
drnnLSTMtanhRewards.append(np.mean(drnnLSTMtanhRewards49))
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan0)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan1)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan2)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan3)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan4)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan5)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan6)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan7)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan8)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan9)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan10)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan11)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan12)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan13)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan14)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan15)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan16)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan17)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan18)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan19)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan20)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan21)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan22)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan23)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan24)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan25)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan26)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan27)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan28)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan29)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan30)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan31)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan32)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan33)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan34)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan35)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan36)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan37)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan38)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan39)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan40)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan41)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan42)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan43)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan44)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan45)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan46)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan47)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan48)
drnnLSTMtanhMakespanList.append(drnnLSTMtanhMakespan49)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards0)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards1)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards2)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards3)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards4)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards5)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards6)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards7)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards8)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards9)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards10)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards11)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards12)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards13)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards14)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards15)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards16)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards17)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards18)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards19)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards20)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards21)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards22)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards23)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards24)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards25)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards26)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards27)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards28)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards29)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards30)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards31)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards32)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards33)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards34)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards35)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards36)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards37)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards38)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards39)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards40)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards41)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards42)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards43)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards44)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards45)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards46)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards47)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards48)
drnnLSTMtanhRewardsList.append(drnnLSTMtanhRewards49)
for vector in drnnLSTMtanhMakespanList:
for element in vector:
drnnLSTMtanhMakespanValues.append(element)
for vector in drnnLSTMtanhRewardsList:
for element in vector:
drnnLSTMtanhRewardsValues.append(element)
smoothLSTMtanhMakespanValues = pd.Series(drnnLSTMtanhMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMtanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con tanh")
plt.show()
smoothLSTMtanhRewardsValues = pd.Series(drnnLSTMtanhRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMtanhRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con tanh")
plt.show()
####################
drnnLSTMreluMakespan = []
drnnLSTMreluRewards = []
drnnLSTMreluMakespanList = []
drnnLSTMreluRewardsList = []
drnnLSTMreluMakespanValues = []
drnnLSTMreluRewardsValues = []
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan0))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan1))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan2))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan3))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan4))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan5))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan6))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan7))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan8))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan9))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan10))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan11))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan12))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan13))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan14))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan15))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan16))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan17))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan18))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan19))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan20))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan21))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan22))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan23))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan24))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan25))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan26))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan27))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan28))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan29))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan30))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan31))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan32))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan33))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan34))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan35))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan36))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan37))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan38))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan39))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan40))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan41))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan42))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan43))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan44))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan45))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan46))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan47))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan48))
drnnLSTMreluMakespan.append(np.mean(drnnLSTMreluMakespan49))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards0))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards1))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards2))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards3))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards4))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards5))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards6))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards7))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards8))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards9))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards10))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards11))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards12))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards13))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards14))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards15))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards16))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards17))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards18))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards19))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards20))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards21))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards22))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards23))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards24))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards25))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards26))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards27))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards28))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards29))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards30))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards31))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards32))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards33))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards34))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards35))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards36))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards37))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards38))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards39))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards40))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards41))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards42))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards43))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards44))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards45))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards46))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards47))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards48))
drnnLSTMreluRewards.append(np.mean(drnnLSTMreluRewards49))
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan0)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan1)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan2)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan3)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan4)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan5)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan6)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan7)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan8)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan9)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan10)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan11)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan12)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan13)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan14)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan15)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan16)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan17)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan18)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan19)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan20)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan21)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan22)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan23)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan24)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan25)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan26)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan27)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan28)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan29)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan30)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan31)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan32)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan33)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan34)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan35)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan36)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan37)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan38)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan39)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan40)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan41)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan42)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan43)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan44)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan45)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan46)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan47)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan48)
drnnLSTMreluMakespanList.append(drnnLSTMreluMakespan49)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards0)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards1)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards2)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards3)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards4)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards5)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards6)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards7)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards8)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards9)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards10)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards11)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards12)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards13)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards14)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards15)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards16)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards17)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards18)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards19)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards20)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards21)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards22)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards23)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards24)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards25)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards26)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards27)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards28)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards29)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards30)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards31)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards32)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards33)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards34)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards35)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards36)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards37)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards38)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards39)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards40)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards41)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards42)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards43)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards44)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards45)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards46)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards47)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards48)
drnnLSTMreluRewardsList.append(drnnLSTMreluRewards49)
for vector in drnnLSTMreluMakespanList:
for element in vector:
drnnLSTMreluMakespanValues.append(element)
for vector in drnnLSTMreluRewardsList:
for element in vector:
drnnLSTMreluRewardsValues.append(element)
smoothLSTMreluMakespanValues = pd.Series(drnnLSTMreluMakespanValues).rolling(12).mean()
plt.plot(smoothLSTMreluMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando LSTM con relu")
plt.show()
smoothLSTMreluRewardsValues = pd.Series(drnnLSTMreluRewardsValues).rolling(12).mean()
plt.plot(smoothLSTMreluRewardsValues)
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' utilizando LSTM con relu")
plt.show()
##################
plt.plot(smoothLSTMtanhMakespanValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluMakespanValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
plt.plot(smoothLSTMtanhRewardsValues, color='blue', label='tanh')
plt.plot(smoothLSTMreluRewardsValues, color='orange', label='relu')
plt.xlabel("Episodios")
plt.ylabel("Premio")
plt.title("'Reward' con red neuronal profunda que incluye 1 capa LSTM")
plt.legend()
plt.show()
##################
##################
##################
drlTanhMakespan = []
drlTanhRewards = []
drlTanhMakespanList = []
drlTanhRewardsList = []
drlTanhMakespanValues = []
drlTanhRewardsValues = []
drlTanhMakespan.append(np.mean(drlTanhMakespan0))
drlTanhMakespan.append(np.mean(drlTanhMakespan1))
drlTanhMakespan.append(np.mean(drlTanhMakespan2))
drlTanhMakespan.append(np.mean(drlTanhMakespan3))
drlTanhMakespan.append(np.mean(drlTanhMakespan4))
drlTanhMakespan.append(np.mean(drlTanhMakespan5))
drlTanhMakespan.append(np.mean(drlTanhMakespan6))
drlTanhMakespan.append(np.mean(drlTanhMakespan7))
drlTanhMakespan.append(np.mean(drlTanhMakespan8))
drlTanhMakespan.append(np.mean(drlTanhMakespan9))
drlTanhMakespan.append(np.mean(drlTanhMakespan10))
drlTanhMakespan.append(np.mean(drlTanhMakespan11))
drlTanhMakespan.append(np.mean(drlTanhMakespan12))
drlTanhMakespan.append(np.mean(drlTanhMakespan13))
drlTanhMakespan.append(np.mean(drlTanhMakespan14))
drlTanhMakespan.append(np.mean(drlTanhMakespan15))
drlTanhMakespan.append(np.mean(drlTanhMakespan16))
drlTanhMakespan.append(np.mean(drlTanhMakespan17))
drlTanhMakespan.append(np.mean(drlTanhMakespan18))
drlTanhMakespan.append(np.mean(drlTanhMakespan19))
drlTanhMakespan.append(np.mean(drlTanhMakespan20))
drlTanhMakespan.append(np.mean(drlTanhMakespan21))
drlTanhMakespan.append(np.mean(drlTanhMakespan22))
drlTanhMakespan.append(np.mean(drlTanhMakespan23))
drlTanhMakespan.append(np.mean(drlTanhMakespan24))
drlTanhMakespan.append(np.mean(drlTanhMakespan25))
drlTanhMakespan.append(np.mean(drlTanhMakespan26))
drlTanhMakespan.append(np.mean(drlTanhMakespan27))
drlTanhMakespan.append(np.mean(drlTanhMakespan28))
drlTanhMakespan.append(np.mean(drlTanhMakespan29))
drlTanhMakespan.append(np.mean(drlTanhMakespan30))
drlTanhMakespan.append(np.mean(drlTanhMakespan31))
drlTanhMakespan.append(np.mean(drlTanhMakespan32))
drlTanhMakespan.append(np.mean(drlTanhMakespan33))
drlTanhMakespan.append(np.mean(drlTanhMakespan34))
drlTanhMakespan.append(np.mean(drlTanhMakespan35))
drlTanhMakespan.append(np.mean(drlTanhMakespan36))
drlTanhMakespan.append(np.mean(drlTanhMakespan37))
drlTanhMakespan.append(np.mean(drlTanhMakespan38))
drlTanhMakespan.append(np.mean(drlTanhMakespan39))
drlTanhMakespan.append(np.mean(drlTanhMakespan40))
drlTanhMakespan.append(np.mean(drlTanhMakespan41))
drlTanhMakespan.append(np.mean(drlTanhMakespan42))
drlTanhMakespan.append(np.mean(drlTanhMakespan43))
drlTanhMakespan.append(np.mean(drlTanhMakespan44))
drlTanhMakespan.append(np.mean(drlTanhMakespan45))
drlTanhMakespan.append(np.mean(drlTanhMakespan46))
drlTanhMakespan.append(np.mean(drlTanhMakespan47))
drlTanhMakespan.append(np.mean(drlTanhMakespan48))
drlTanhMakespan.append(np.mean(drlTanhMakespan49))
drlTanhRewards.append(np.mean(drlTanhRewards0))
drlTanhRewards.append(np.mean(drlTanhRewards1))
drlTanhRewards.append(np.mean(drlTanhRewards2))
drlTanhRewards.append(np.mean(drlTanhRewards3))
drlTanhRewards.append(np.mean(drlTanhRewards4))
drlTanhRewards.append(np.mean(drlTanhRewards5))
drlTanhRewards.append(np.mean(drlTanhRewards6))
drlTanhRewards.append(np.mean(drlTanhRewards7))
drlTanhRewards.append(np.mean(drlTanhRewards8))
drlTanhRewards.append(np.mean(drlTanhRewards9))
drlTanhRewards.append(np.mean(drlTanhRewards10))
drlTanhRewards.append(np.mean(drlTanhRewards11))
drlTanhRewards.append(np.mean(drlTanhRewards12))
drlTanhRewards.append(np.mean(drlTanhRewards13))
drlTanhRewards.append(np.mean(drlTanhRewards14))
drlTanhRewards.append(np.mean(drlTanhRewards15))
drlTanhRewards.append(np.mean(drlTanhRewards16))
drlTanhRewards.append(np.mean(drlTanhRewards17))
drlTanhRewards.append(np.mean(drlTanhRewards18))
drlTanhRewards.append(np.mean(drlTanhRewards19))
drlTanhRewards.append(np.mean(drlTanhRewards20))
drlTanhRewards.append(np.mean(drlTanhRewards21))
drlTanhRewards.append(np.mean(drlTanhRewards22))
drlTanhRewards.append(np.mean(drlTanhRewards23))
drlTanhRewards.append(np.mean(drlTanhRewards24))
drlTanhRewards.append(np.mean(drlTanhRewards25))
drlTanhRewards.append(np.mean(drlTanhRewards26))
drlTanhRewards.append(np.mean(drlTanhRewards27))
drlTanhRewards.append(np.mean(drlTanhRewards28))
drlTanhRewards.append(np.mean(drlTanhRewards29))
drlTanhRewards.append(np.mean(drlTanhRewards30))
drlTanhRewards.append(np.mean(drlTanhRewards31))
drlTanhRewards.append(np.mean(drlTanhRewards32))
drlTanhRewards.append(np.mean(drlTanhRewards33))
drlTanhRewards.append(np.mean(drlTanhRewards34))
drlTanhRewards.append(np.mean(drlTanhRewards35))
drlTanhRewards.append(np.mean(drlTanhRewards36))
drlTanhRewards.append(np.mean(drlTanhRewards37))
drlTanhRewards.append(np.mean(drlTanhRewards38))
drlTanhRewards.append(np.mean(drlTanhRewards39))
drlTanhRewards.append(np.mean(drlTanhRewards40))
drlTanhRewards.append(np.mean(drlTanhRewards41))
drlTanhRewards.append(np.mean(drlTanhRewards42))
drlTanhRewards.append(np.mean(drlTanhRewards43))
drlTanhRewards.append(np.mean(drlTanhRewards44))
drlTanhRewards.append(np.mean(drlTanhRewards45))
drlTanhRewards.append(np.mean(drlTanhRewards46))
drlTanhRewards.append(np.mean(drlTanhRewards47))
drlTanhRewards.append(np.mean(drlTanhRewards48))
drlTanhRewards.append(np.mean(drlTanhRewards49))
drlTanhMakespanList.append(drlTanhMakespan0)
drlTanhMakespanList.append(drlTanhMakespan1)
drlTanhMakespanList.append(drlTanhMakespan2)
drlTanhMakespanList.append(drlTanhMakespan3)
drlTanhMakespanList.append(drlTanhMakespan4)
drlTanhMakespanList.append(drlTanhMakespan5)
drlTanhMakespanList.append(drlTanhMakespan6)
drlTanhMakespanList.append(drlTanhMakespan7)
drlTanhMakespanList.append(drlTanhMakespan8)
drlTanhMakespanList.append(drlTanhMakespan9)
drlTanhMakespanList.append(drlTanhMakespan10)
drlTanhMakespanList.append(drlTanhMakespan11)
drlTanhMakespanList.append(drlTanhMakespan12)
drlTanhMakespanList.append(drlTanhMakespan13)
drlTanhMakespanList.append(drlTanhMakespan14)
drlTanhMakespanList.append(drlTanhMakespan15)
drlTanhMakespanList.append(drlTanhMakespan16)
drlTanhMakespanList.append(drlTanhMakespan17)
drlTanhMakespanList.append(drlTanhMakespan18)
drlTanhMakespanList.append(drlTanhMakespan19)
drlTanhMakespanList.append(drlTanhMakespan20)
drlTanhMakespanList.append(drlTanhMakespan21)
drlTanhMakespanList.append(drlTanhMakespan22)
drlTanhMakespanList.append(drlTanhMakespan23)
drlTanhMakespanList.append(drlTanhMakespan24)
drlTanhMakespanList.append(drlTanhMakespan25)
drlTanhMakespanList.append(drlTanhMakespan26)
drlTanhMakespanList.append(drlTanhMakespan27)
drlTanhMakespanList.append(drlTanhMakespan28)
drlTanhMakespanList.append(drlTanhMakespan29)
drlTanhMakespanList.append(drlTanhMakespan30)
drlTanhMakespanList.append(drlTanhMakespan31)
drlTanhMakespanList.append(drlTanhMakespan32)
drlTanhMakespanList.append(drlTanhMakespan33)
drlTanhMakespanList.append(drlTanhMakespan34)
drlTanhMakespanList.append(drlTanhMakespan35)
drlTanhMakespanList.append(drlTanhMakespan36)
drlTanhMakespanList.append(drlTanhMakespan37)
drlTanhMakespanList.append(drlTanhMakespan38)
drlTanhMakespanList.append(drlTanhMakespan39)
drlTanhMakespanList.append(drlTanhMakespan40)
drlTanhMakespanList.append(drlTanhMakespan41)
drlTanhMakespanList.append(drlTanhMakespan42)
drlTanhMakespanList.append(drlTanhMakespan43)
drlTanhMakespanList.append(drlTanhMakespan44)
drlTanhMakespanList.append(drlTanhMakespan45)
drlTanhMakespanList.append(drlTanhMakespan46)
drlTanhMakespanList.append(drlTanhMakespan47)
drlTanhMakespanList.append(drlTanhMakespan48)
drlTanhMakespanList.append(drlTanhMakespan49)
drlTanhRewardsList.append(drlTanhRewards0)
drlTanhRewardsList.append(drlTanhRewards1)
drlTanhRewardsList.append(drlTanhRewards2)
drlTanhRewardsList.append(drlTanhRewards3)
drlTanhRewardsList.append(drlTanhRewards4)
drlTanhRewardsList.append(drlTanhRewards5)
drlTanhRewardsList.append(drlTanhRewards6)
drlTanhRewardsList.append(drlTanhRewards7)
drlTanhRewardsList.append(drlTanhRewards8)
drlTanhRewardsList.append(drlTanhRewards9)
drlTanhRewardsList.append(drlTanhRewards10)
drlTanhRewardsList.append(drlTanhRewards11)
drlTanhRewardsList.append(drlTanhRewards12)
drlTanhRewardsList.append(drlTanhRewards13)
drlTanhRewardsList.append(drlTanhRewards14)
drlTanhRewardsList.append(drlTanhRewards15)
drlTanhRewardsList.append(drlTanhRewards16)
drlTanhRewardsList.append(drlTanhRewards17)
drlTanhRewardsList.append(drlTanhRewards18)
drlTanhRewardsList.append(drlTanhRewards19)
drlTanhRewardsList.append(drlTanhRewards20)
drlTanhRewardsList.append(drlTanhRewards21)
drlTanhRewardsList.append(drlTanhRewards22)
drlTanhRewardsList.append(drlTanhRewards23)
drlTanhRewardsList.append(drlTanhRewards24)
drlTanhRewardsList.append(drlTanhRewards25)
drlTanhRewardsList.append(drlTanhRewards26)
drlTanhRewardsList.append(drlTanhRewards27)
drlTanhRewardsList.append(drlTanhRewards28)
drlTanhRewardsList.append(drlTanhRewards29)
drlTanhRewardsList.append(drlTanhRewards30)
drlTanhRewardsList.append(drlTanhRewards31)
drlTanhRewardsList.append(drlTanhRewards32)
drlTanhRewardsList.append(drlTanhRewards33)
drlTanhRewardsList.append(drlTanhRewards34)
drlTanhRewardsList.append(drlTanhRewards35)
drlTanhRewardsList.append(drlTanhRewards36)
drlTanhRewardsList.append(drlTanhRewards37)
drlTanhRewardsList.append(drlTanhRewards38)
drlTanhRewardsList.append(drlTanhRewards39)
drlTanhRewardsList.append(drlTanhRewards40)
drlTanhRewardsList.append(drlTanhRewards41)
drlTanhRewardsList.append(drlTanhRewards42)
drlTanhRewardsList.append(drlTanhRewards43)
drlTanhRewardsList.append(drlTanhRewards44)
drlTanhRewardsList.append(drlTanhRewards45)
drlTanhRewardsList.append(drlTanhRewards46)
drlTanhRewardsList.append(drlTanhRewards47)
drlTanhRewardsList.append(drlTanhRewards48)
drlTanhRewardsList.append(drlTanhRewards49)
for vector in drlTanhMakespanList:
for element in vector:
drlTanhMakespanValues.append(element)
for vector in drlTanhRewardsList:
for element in vector:
drlTanhRewardsValues.append(element)
smoothdrlTanhMakespanValues = pd.Series(drlTanhMakespanValues).rolling(12).mean()
plt.plot(smoothdrlTanhMakespanValues)
plt.xlabel("Episodios")
plt.ylabel("Segundos")
plt.title("'Makespan' utilizando feedforward con tanh")
plt.show()
smoothdrlTanhRewardsValues = | pd.Series(drlTanhRewardsValues) | pandas.Series |
"""
.. module:: text_processing_methods
:synopsis: Holding processing classes!
.. moduleauthor:: <NAME>
"""
import os
import re
from abc import ABC, abstractmethod
from typing import List
import gensim
import pandas as pd
import numpy as np
from nltk.util import ngrams
from pattern.en import parse
from difflib import SequenceMatcher
from pycontractions import Contractions
from segtok.segmenter import split_single
from segtok.tokenizer import split_contractions
from segtok.tokenizer import word_tokenizer
from mtc.helpers.text2digits import Text2Digits
from mtc.settings import NLP_MODELS_PATH
from mtc.mtc_properties import STOP_WORDS
def TextProcessing(name, *args, **kwargs):
"""
All text processing classes should be called via this method
"""
for cls in TextProcessingBaseClass.__subclasses__():
if cls.__name__ == name:
return cls(*args, **kwargs)
raise ValueError('No text processing named %s' % name)
class TextProcessingBaseClass(ABC):
"""
Any text processing class must inherit from this class
"""
def preprocess(self, sentence_list: List):
if self.processing_type == 'corpus':
return self._process_internal(sentence_list)
else:
processed_texts = []
for text_element in sentence_list:
text_element = self._process_internal(text_element)
processed_texts.append(text_element)
return processed_texts
@property
def processing_type(self):
return 'sentence_level'
@abstractmethod
def level(self) -> str:
pass
@abstractmethod
def _process_internal(self, sentence_list: List[str]) -> List[str]:
"""Private method to process a piece of text"""
pass
class ContractionExpander(TextProcessingBaseClass):
'''
Removes contractions from the text and uses the full version instead (unification).
Example:
I'll walk down the road --> I will walk down the road
'''
model_contraction_expander = None
def __init__(self, model=None):
'''
:param model: Pretrained word embedding model.
'''
super().__init__()
if model is None:
# If no model is given, use the default one and store it as a static class variable to avoid multiple loadings
if ContractionExpander.model_contraction_expander is None:
model_path = os.path.join(NLP_MODELS_PATH, 'pretrained', 'word_embeddings', 'pubmed2018_w2v_400D',
'pubmed2018_w2v_400D.bin')
ContractionExpander.model_contraction_expander = gensim.models.KeyedVectors.load_word2vec_format(model_path, binary=True)
model = ContractionExpander.model_contraction_expander
self.cont = Contractions(kv_model=model)
def level(self) -> str:
return "text"
def _process_internal(self, text: str) -> str:
'''
:param text: Input string.
:return: The string without contractions.
'''
return list(self.cont.expand_texts([text], precise=True))[0]
class NumberUnifier(TextProcessingBaseClass):
'''
Unifies number representations in the text.
Example:
One capsule mouth two times day --> 1 capsule mouth 2 times day
'''
def __init__(self):
super().__init__()
self.t2n = Text2Digits()
def level(self) -> str:
return "text"
def _process_internal(self, text: str) -> str:
'''
:param text: Input string.
:return: The string without any numbers written-out.
'''
text = re.sub(r'(?:one)?-?half\b', '0.5', text)
text = re.sub(r'(\d)\s*(?:to|-)\s*(\d)', r'\1-\2', text)
text = self.t2n.convert(text)
return text
class SpellingCorrector(TextProcessingBaseClass):
'''
Tries to correct some common spelling errors specific to the sts training dataset.
Note that the case of the spell-corrected words is not preserved.
Example:
Pleast let me know --> Please let me know
'''
def __init__(self):
self.errors = {
'Pleast': 'please',
'Locaation': 'location',
'CURRENTand': 'current and',
'LocationEmergency': 'location emergency',
'refil': 'refill',
'EDUCATIONReady': 'education ready',
'd aily': 'daily'
}
def level(self) -> str:
return "text"
def _process_internal(self, text: str) -> str:
for error, correction in self.errors.items():
text = re.sub(r'\b' + error + r'\b', correction, text)
return text
class RemoveActiveIngredients(TextProcessingBaseClass):
'''
Removes active ingredients: i.e: aw [aber] -> aw
'''
def level(self) -> str:
return "text"
def _process_internal(self, text: str) -> str:
text = re.sub(r'\[([^]]+)\]', '', text)
#text = re.sub(r'\b' + error + r'\b', correction, text)
return text
class SentenceTokenizer(TextProcessingBaseClass):
'''
Tokenizes a piece of text into sentences. Note that this does also remove the whitespace between the sentences.
Example:
Hello there. How are you? --> [Hello there., How are you?]
'''
def level(self) -> str:
return "text"
def _process_internal(self, text: str) -> List[str]:
return split_single(text)
class WordTokenizer(TextProcessingBaseClass):
'''
Tokenizes sentences into words.
Example:
[Hello there., How are you?] --> [[Hello, there, .], [How, are, you, ?]]
'''
def level(self) -> str:
return "sentence"
def _process_internal(self, sentences: List[str]) -> List[List[str]]:
return [split_contractions(word_tokenizer(sen)) for sen in sentences]
class PunctuationRemover(TextProcessingBaseClass):
'''
Removes punctuation tokens.
Example:
[[Hello, there, .], [How, are, you, ?]] --> [[Hello, there], [How, are, you]]
'''
def level(self) -> str:
return "token"
def _is_punctuation(self, token):
if re.search(r'\d-\d', token):
# Keep number ranges
return False
elif re.search(r'\d\.', token) or re.search(r'\.\d', token):
# Keep numbers
return False
else:
return bool(re.search(r'[^a-zA-Z0-9äÄöÖüÜß\s]', token))
def _process_internal(self, tokenized_sentences: List[List[str]]) -> List[List[str]]:
sentences = []
for sen in tokenized_sentences:
words = [token for token in sen if not self._is_punctuation(token)]
if words:
sentences.append(words)
return sentences
class MedicationRemover(TextProcessingBaseClass):
'''
'''
def level(self) -> str:
return "text"
def _process_internal(self, text: str) -> str:
return re.sub(r'\[[^]]+\]', '', text)
class DrugsStandardize(TextProcessingBaseClass):
'''
Removes punctuation tokens.
Example:
[['lasix']] --> ['FUROSEMIDE']
'''
def level(self) -> str:
return "token"
def _process_internal(self, tokenized_sentences: List[List[str]]) -> List[List[str]]:
import mtc.drugstandards as drugs
sentences = []
for tokens in tokenized_sentences:
std_tokens = drugs.standardize(tokens, thresh=1)
std_sentence = []
for std_token, token in zip(std_tokens, tokens):
if std_token is None:
std_sentence.append(token)
else:
std_sentence.append(std_token)
sentences.append(std_sentence)
return sentences
class StopWordsRemover(TextProcessingBaseClass):
'''
Removes stop word tokens.
Example:
[[Hello, there], [How, are, you]] --> [[Hello]]
'''
def level(self) -> str:
return "token"
def _process_internal(self, tokenized_sentences: List[List[str]]) -> List[List[str]]:
sentences = []
for sen in tokenized_sentences:
words = [word for word in sen if word not in STOP_WORDS]
if words:
sentences.append(words)
return sentences
class LowerCaseTransformer(TextProcessingBaseClass):
'''
Transforms every string to lowercase.
Example:
[[Hello]] --> [[hello]]
'''
def level(self) -> str:
return "text or token"
def _process_internal(self, text):
if type(text) == str:
return text.lower()
else:
sentences = []
for sen in text:
words = [word.lower() for word in sen]
sentences.append(words)
return sentences
class Lemmatizer(TextProcessingBaseClass):
'''
Lemmatizes tokens, i.e. transforms every word to its base form.
Example:
[[obtains]] --> [[obtain]]
'''
def level(self) -> str:
return "token"
def _process_internal(self, tokenized_sentences: List[List[str]]) -> List[List[str]]:
sentences = []
for sen in tokenized_sentences:
# Unfortunately, the lemma() method does not yield the same result as parse (try e.g. with 'mice')
words = [parse(token, lemmata=True).split('/')[-1] for token in sen]
sentences.append(words)
return sentences
class RemoveLowHighFrequencyWords(TextProcessingBaseClass):
'''
Removes words of high or low frequency.
:param frequency: Number of occurrences.
:param low_frequency: Low or high frequency
'''
def __init__(self, frequency: int, low_frequency: str):
self.frequency = frequency
self.low_frequency = low_frequency
@property
def processing_type(self):
return 'corpus'
def level(self) -> str:
return "token"
def _process_internal(self, sentence_list: List) -> List:
print('up to know did not help a lot')
new_sentence_list = []
all_tokens = []
for tokenized_sentences in sentence_list:
for sen in tokenized_sentences:
all_tokens = all_tokens + sen
df_all_tokens = | pd.DataFrame(all_tokens, columns=['tokens']) | pandas.DataFrame |
import sys
import os
import csv
import math
import json
from datetime import datetime
import cv2
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PowerTransformer
from sklearn.decomposition import PCA
from sklearn.decomposition import FastICA
from sklearn.manifold import MDS
from sklearn.manifold import TSNE
from sklearn.cross_decomposition import PLSRegression
from sklearn.cluster import KMeans
################################
# clustering related functions #
################################
def label_groundTruthFixationMap(_gt, _x, _y):
if np.array_equal(_gt[_y][_x], np.array([0, 0, 0])):
return 0
else:
return 1
def generate_discrete_groundTruthFixationMap(_gt):
_dgtfmPath = "./static/__cache__/discrete_ground_truth_fixation_map.png"
gtCopy = _gt.copy()
mapHeight, mapWidth = gtCopy.shape[:2]
for i in range(0, len(gtCopy)):
for j in range(0, len(gtCopy[i])):
replaceArr = np.array([255, 255, 255])
if np.array_equal(gtCopy[i][j], np.array([0, 0, 0])):
replaceArr = np.array([0, 0, 0])
gtCopy[i][j] = replaceArr
gtCopy.imwrite(_dgtfmPath)
def getFeatureMeanVal(_featDF, _x, _y, _stiWidth, _stiHeight, _patchSize):
meanVal = 0
min_x = int(_x - _patchSize/2)
max_x = int(_x + _patchSize/2)
if min_x < 0:
min_x = 0
if max_x > _stiWidth-1:
max_x = int(_stiWidth-1)
min_y = int(_y - _patchSize/2)
max_y = int(_y + _patchSize/2)
if min_y < 0:
min_y = 0
if max_y > _stiHeight-1:
max_y = int(_stiHeight-1)
featNP = _featDF.to_numpy()
# print("top: %d, bottom: %d, left: %d, right: %d"%(min_y, max_y, min_x, max_x))
patch = featNP[min_y:max_y, min_x:max_x]
# print(patch.shape)
meanVal = patch.mean()
return meanVal
def dataTransformation(tMethod, df, featureList):
# print("Data transformation method: "+tMethod)
if tMethod == "raw":
return df
elif tMethod == "min_max":
return dt_minMax(df, featureList)
elif tMethod == "z_score":
return dt_zScore(df, featureList)
elif tMethod == "yeo_johonson":
return dt_yeoJohnson(df, featureList)
elif tMethod == "yeo_johonson_min_max":
return dt_yeoJohnson_minMax(df, featureList)
else:
print("ERROR: unavailable data transformation method selected")
return df
def dt_minMax(df, featureList):
getColNames = df.columns
tfDF = df[[getColNames[0], getColNames[1], getColNames[2], getColNames[3]]]
colCount = 3
for featureName in featureList:
colCount = colCount+1
colFeatDF = df[featureName]
scaler = MinMaxScaler()
_tf = scaler.fit_transform(colFeatDF.values.reshape(-1, 1))
tfDF.insert(colCount, featureName, _tf, True)
# tfDF[featureName] = _tf
return tfDF
def dt_zScore(df, featureList):
getColNames = df.columns
tfDF = df[[getColNames[0], getColNames[1], getColNames[2], getColNames[3]]]
colCount = 3
for featureName in featureList:
colCount = colCount+1
colFeatDF = df[featureName]
scaler = StandardScaler()
_tf = scaler.fit_transform(colFeatDF.values.reshape(-1, 1))
tfDF.insert(colCount, featureName, _tf, True)
# tfDF[featureName] = _tf
return tfDF
def dt_yeoJohnson(df, featureList):
getColNames = df.columns
tfDF = df[[getColNames[0], getColNames[1], getColNames[2], getColNames[3]]]
colCount = 3
for featureName in featureList:
colCount = colCount+1
colFeatDF = df[featureName]
scaler = PowerTransformer(method='yeo-johnson')
_tf = scaler.fit_transform(colFeatDF.values.reshape(-1, 1))
tfDF.insert(colCount, featureName, _tf, True)
# tfDF[featureName] = _tf
return tfDF
def dt_yeoJohnson_minMax(df, featureList):
_df_1 = dt_yeoJohnson(df, featureList)
_df_2 = dt_minMax(_df_1, featureList)
return _df_2
def dataClustering(cMethod):
# print("Data clustering method: "+cMethod)
if cMethod == "random_forest":
return dc_randomForest()
elif cMethod == "dbscan":
return dc_dbscan()
elif cMethod == "hdbscan":
return dc_hdbscan()
elif cMethod == "k_means":
return dc_kMeans()
else:
print("ERROR: unavailable data clustering method selected")
def dc_randomForest():
return 0
def dc_dbscan():
return 0
def dc_hdbscan():
return 0
def dc_kMeans():
return 0
def dimensionReduction(drMethod, df, featureList):
# print("Dimension reduction method: "+drMethod)
if drMethod == "MDS":
return dr_MDS(df, featureList)
elif drMethod == "PCA":
return dr_PCA(df, featureList)
elif drMethod == "ICA":
return dr_ICA(df, featureList)
elif drMethod == "t_SNE":
return dr_TSNE(df, featureList)
elif drMethod == "PLS":
return dr_PLS(df, featureList)
else:
print("ERROR: unavailable dimension reduction method selected")
return df[['x', 'y']]
def dr_MDS(df, featureList):
drm = MDS(n_components=2, random_state=0)
drDF = drm.fit_transform(df[featureList])
return drDF
def dr_PCA(df, featureList):
drm = PCA(n_components=2, random_state=0)
drDF = drm.fit_transform(df[featureList])
return drDF
def dr_ICA(df, featureList):
drm = FastICA(n_components=2, random_state=0)
drDF = drm.fit_transform(df[featureList])
return drDF
def dr_TSNE(df, featureList):
drm = TSNE(learning_rate=100, random_state=0)
drDF = drm.fit_transform(df[featureList])
return drDF
def dr_PLS(df, featureList):
drm = PLSRegression(n_components=2)
drDF, _ = drm.fit_transform(df[featureList], df["label"])
return drDF
FEATURE_ordered = ["intensity", "color", "orientation", "curvature", "center_bias", "entropy_rate", "log_spectrum", "HOG"]
DATA_TRANSFORMATION_LIST = ["min_max"]
DIMENSION_REDUCTION_LIST = ["MDS", "PCA"]
PATCH_SIZE = 20
FIX_DATA_DIR = "./fix/"
DATASET_LIST = os.listdir(FIX_DATA_DIR)
MID_FILE_GEN_PATH = ""
CACHE_FILE_GEN_PATH = ""
# midCacheFlag = True
ERROR_LOG = []
for dt in DATA_TRANSFORMATION_LIST:
for dr in DIMENSION_REDUCTION_LIST:
for dataName in DATASET_LIST:
path = FIX_DATA_DIR + dataName +"/"
STI_CLASS_LIST = os.listdir(path)
for stiClass in STI_CLASS_LIST:
path = FIX_DATA_DIR + dataName +"/"+ stiClass +"/"
STI_DIR_LIST = os.listdir(path)
for stiDirName in STI_DIR_LIST:
stiNameWithoutExe = ""
stiDirNameSplit = stiDirName.split("_")
if len(stiDirNameSplit) == 2:
stiNameWithoutExe = stiDirNameSplit[0]
else:
for i in range(0, len(stiDirNameSplit)):
if i==0:
stiNameWithoutExe = stiDirNameSplit[i]
elif i==len(stiDirNameSplit)-1:
break
else:
stiNameWithoutExe = stiNameWithoutExe +"_"+ stiDirNameSplit[i]
path = FIX_DATA_DIR + dataName +"/"+ stiClass +"/"+ stiDirName +"/"
FIX_FILE_LIST = os.listdir(path)
# MID_FILE_GEN_PATH = "./cache_mid/midcache-"+ dataName +"-"+ stiClass +"-"+ stiDirName +"-"+ dt +"-"+ dr +"-"+ str(len(FIX_FILE_LIST)) +".csv"
CACHE_FILE_GEN_PATH = "./cache/cache_"+ dataName +"-"+ stiClass +"-"+ stiDirName +"-"+ dt +"-"+ dr +"-"+ str(len(FIX_FILE_LIST)) +".csv"
if os.path.exists(CACHE_FILE_GEN_PATH):
print("CACHE: Already exists: "+CACHE_FILE_GEN_PATH)
continue
# midCacheFag = True
# if os.path.exists(MID_FILE_GEN_PATH):
# print("MID: Already exists: "+MID_FILE_GEN_PATH)
# midCacheFlag = False
# break
print(CACHE_FILE_GEN_PATH)
# print("start time: "+datetime.today().strftime("%Y/%m/%d-%H:%M:%S"))
gtFixMapPath = "./ground_truth/" + dataName +"/"+ stiClass +"/"+ stiNameWithoutExe +".jpg"
groundTruthFixMap = cv2.imread(gtFixMapPath)
fmHeight, fmWidth = groundTruthFixMap.shape[:2]
loadedFeatureDataMatrix = []
for _f in FEATURE_ordered:
featureFilePath = "./feature/" + _f +"/"+ dataName +"/"+ stiClass +"/"+ stiNameWithoutExe +".csv"
featureDF = pd.read_csv(featureFilePath)
loadedFeatureDataMatrix.append(featureDF)
aggregatedDataList = []
for fixFileName in FIX_FILE_LIST:
path = FIX_DATA_DIR + dataName +"/"+ stiClass +"/"+ stiDirName +"/"+ fixFileName
df = | pd.read_csv(path, header=None) | pandas.read_csv |
import numpy as np
from numpy import where
from flask import Flask, request, jsonify, render_template
import pandas as pd
from sklearn.ensemble import IsolationForest
from pyod.models.knn import KNN
import json
from flask import send_from_directory
from flask import current_app
app = Flask(__name__)
class Detect:
def __init__(self, file, non_num):
self.file = file
self.non_num = non_num
def IQR(self):
# anomaly=pd.DataFrame()
data = | pd.DataFrame(self.file) | pandas.DataFrame |
from collections import OrderedDict
import numpy as np
import pandas as pd
from .helpers import Interval
from .helpers import path_leaf
from .fasta import FastaReader
from Bio.Seq import Seq
from Bio.Alphabet import generic_dna
def counts_to_tpm(counts, sizes):
"""Counts to TPM
Parameters
----------
counts: array like
Series/array of counts
sizes: array like
Series/array of region sizes
"""
rate = np.log(counts).subtract(np.log(sizes))
denom = np.log(np.sum(np.exp(rate)))
tpm = np.exp(rate - denom + np.log(1e6))
return tpm
def featurecounts_to_tpm(fc_f, outfile):
"""Convert htseq-counts file to tpm
Parameters
----------
fc_f: string
Path to htseq-count output
outfile: string
Path to output file with tpm values
"""
feature_counts = pd.read_csv(fc_f, sep="\t")
feature_counts = feature_counts.set_index("Geneid")
feature_counts = feature_counts.drop(columns=["Chr", "Start", "End", "Strand"])
lengths = feature_counts["Length"]
feature_counts = feature_counts.drop(columns=["Length"])
tpm = feature_counts.apply(lambda x: counts_to_tpm(x, lengths), axis=0)
tpm.columns = [
col.replace("bams_unique/", "").replace(".bam", "") for col in tpm.columns
]
tpm.to_csv(outfile, sep="\t", index=True, header=True)
def read_ribocounts_raw_count(filepath):
"""Read ribotricer counts as a dataframe"""
df = pd.read_csv(filepath, sep="\t")
df = df.sort_values(by=["gene_id"])
df = df.set_index("gene_id")
return df[["count"]]
def read_ribocounts_length_normalized_count(filepath):
"""Read ribotricer counts divided by length as a dataframe"""
df = pd.read_csv(filepath, sep="\t")
if "length" not in df.columns:
raise Exception("length column missing in input {}".format(filepath))
df["normalized_count"] = np.divide(1 + df["count"], df["length"])
df = df.sort_values(by=["gene_id"])
df = df.set_index("gene_id")
return df[["normalized_count"]]
def read_raw_counts_into_matrix(count_files):
"""Read ribotricer raw counts for multiple files in a dataframe"""
counts_df = pd.DataFrame()
for f in count_files:
filename = path_leaf(f)
filename = filename.replace("_counts_cnt.txt", "")
df = read_ribocounts_raw_count(f)
df.columns = [filename]
counts_df = counts_df.join(df, how="outer")
return counts_df
def ribotricer_index_to_fasta(ribotricer_index, fasta_file):
"""Convert ribotricer index to fasta.
Parameters
----------
ribotricer_index: str or pd.DataFrame
Path to ribotricer index file or read_csv version
fasta_file: str
Location of fasta
Returns
-------
sequences: list
list of list with orf_id and corresponding sequence
"""
fasta = FastaReader(fasta_file)
if isinstance(ribotricer_index, str):
ribotricer_index = pd.read_csv(ribotricer_index, sep="\t").set_index("ORF_ID")
sequences = []
for idx, row in ribotricer_index.iterrows():
intervals = []
for coordinate in row.coordinate.split(","):
start, stop = coordinate.split("-")
start = int(start)
stop = int(stop)
interval = Interval(row.chrom, start, stop)
intervals.append(interval)
sequence = fasta.query(intervals)
sequence = "".join(sequence)
if row.strand == "-":
sequence = fasta.reverse_complement(sequence)
sequences.append([idx, sequence])
return sequences
def get_translated_sequence(sequence):
"""Translate a sequence"""
translated_seq = str(Seq(sequence, generic_dna).translate())
return translated_seq
def get_translated_sequences_row(sequences):
"""Translate a sequence for a given row from ribotricer index df"""
for idx, row in enumerate(sequences):
orf_id, dna_seq = row
translated_seq = str(Seq(dna_seq, generic_dna).translate())
sequences[idx].append(translated_seq)
return sequences
def ribotricer_index_row_to_fasta(row, fasta_file):
"""Convert ribotricer index to fasta (just one row).
Parameters
----------
ribotricer_index: str or pd.DataFrame
Path to ribotricer index file or read_csv version
fasta_file: str
Location of fasta
Returns
-------
sequences: list
list of list with orf_id and corresponding sequence
"""
fasta = FastaReader(fasta_file)
# sequences = []
# idx = row.index
intervals = []
for coordinate in row.coordinate.split(","):
start, stop = coordinate.split("-")
start = int(start)
stop = int(stop)
interval = Interval(row.chrom, start, stop)
intervals.append(interval)
sequence = fasta.query(intervals)
sequence = "".join(sequence)
if row.strand == "-":
sequence = fasta.reverse_complement(sequence)
return sequence
def read_orf_lengths_into_matrix(count_files):
"""Read gene lengths into a matrix"""
lengths_dict = OrderedDict()
for f in count_files:
df = pd.read_csv(f, sep="\t").set_index("gene_id")
lengths = df[["length"]].to_dict()["length"]
for key, value in lengths.items():
if key not in lengths_dict:
lengths_dict[key] = value
lengths = pd.DataFrame.from_dict(lengths_dict, orient="index", columns=["length"])
lengths.index.name = "gene_id"
return lengths.sort_index()
def count_matrix_to_tpm(counts_matrix, gene_lengths):
"""Convert counts matrix to TPM matrix"""
gene_lengths = gene_lengths.loc[counts_matrix.index]
tpm_matrix = counts_matrix.copy()
if "gene_id" in tpm_matrix.columns:
tpm_matrix = tpm_matrix.set_index("gene_id")
tpm_matrix = tpm_matrix.fillna(0)
for col in tpm_matrix.columns:
tpm_matrix[col] = counts_to_tpm(tpm_matrix[col], gene_lengths["length"])
return tpm_matrix
def get_orf_hits(ribotricer_index, chrom, target_range):
max_intersection = 0
target_orfid = ""
chr_orfs = ribotricer_index.loc[ribotricer_index["chrom"] == chrom]
for idx, row in chr_orfs.iterrows():
coordinates = row.coordinate
coordinates = coordinates.split(",")
for coordinate in coordinates:
start, end = coordinate.split("-")
start = int(start)
end = int(end)
if start > target_range[1]:
continue
if end < target_range[0]:
continue
query_range = range(start, end + 1)
query_length = len(query_range)
intersection = set(query_range).intersection(target_range)
intersection_length = len(intersection)
if intersection_length > max_intersection:
max_intersection = intersection_length
target_orfid = row.ORF_ID
return max_intersection, target_orfid
def coordlist_to_length(coordlist):
"Given a list of coordinates get the length of the region" ""
all_coords = dict()
for coords in coordlist:
for coord in coords.split(","):
start, stop = coord.split("-")
start = int(start)
stop = int(stop)
for x in range(start, stop + 1):
if x not in all_coords:
all_coords[x] = 0
return len(all_coords.keys())
def ribotricer_index_to_gene_length(indexfile, outfile):
"""Collapse index to gene level lengths for different ORF types"""
df = | pd.read_csv(indexfile, sep="\t", usecols=["gene_id", "ORF_type", "coordinate"]) | pandas.read_csv |
import pandas as pd
import matplotlib.pyplot as plt
def plot_feature_importances(rf, cols, model_dir='.'):
importances = | pd.DataFrame() | pandas.DataFrame |
import numpy as np
import cv2
import csv
import os
import pandas as pd
import time
def calcuNearestPtsDis2(ptList1):
''' Find the nearest point of each point in ptList1 & return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if len(ptList1)<=1:
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
ptList2 = np.delete(ptList1,i,axis=0)
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPtsDis(ptList1, ptList2):
''' Find the nearest point of each point in ptList1 from ptList2
& return the mean min_distance
Parameters
----------
ptList1: numpy array
points' array, shape:(x,2)
ptList2: numpy array
points' array, shape:(x,2)
Return
----------
mean_Dis: float
the mean value of the minimum distances
'''
if (not len(ptList2)) or (not len(ptList1)):
print('error!')
return 'error'
minDis_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1).astype(np.float32) )
minDis = disMat.min()
minDis_list.append(minDis)
minDisArr = np.array(minDis_list)
mean_Dis = np.mean(minDisArr)
return mean_Dis
def calcuNearestPts(csvName1, csvName2):
ptList1_csv = pd.read_csv(csvName1,usecols=['x_cord', 'y_cord'])
ptList2_csv = pd.read_csv(csvName2,usecols=['x_cord', 'y_cord'])
ptList1 = ptList1_csv.values[:,:2]
ptList2 = ptList2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,0:2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
ptList1_csv = pd.concat([ptList1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
ptList1_csv.to_csv(csvName1,index=False)
return minDisInd
def drawDisPic(picInd):
picName = 'patients_dataset/image/'+ picInd +'.png'
img = cv2.imread(picName)
csvName1='patients_dataset/data_csv/'+picInd+'other_tumour_pts.csv'
csvName2='patients_dataset/data_csv/'+picInd+'other_lymph_pts.csv'
ptList1_csv = pd.read_csv(csvName1)
ptList2_csv = pd.read_csv(csvName2)
ptList1 = ptList1_csv.values
ptList2 = ptList2_csv.values
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 3 , (0, 0, 255), -1 )
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), (0,255,0), 1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 3 , (255, 0, 0), -1 )
cv2.imwrite( picInd+'_dis.png',img)
def drawDistancePic(disName1, disName2, picID):
''' Draw & save the distance pics
Parameters
----------
disName1,disName2: str
such as 'positive_lymph', 'all_tumour'
picID: str
the patient's ID
'''
cellName_color = {'other_lymph': (255, 0, 0), 'positive_lymph': (255, 255, 0),
'other_tumour': (0, 0, 255), 'positive_tumour': (0, 255, 0)}
ptline_color = {'positive_lymph': (0,0,255), 'positive_tumour': (0,0,255),
'ptumour_plymph': (51, 97, 235), 'other_tumour': (0, 255, 0)}
if (disName1 == 'all_tumour' and disName2 == 'all_lymph') or (disName1 == 'all_tumour' and disName2 == 'positive_lymph'):
line_color = (0,255,255)
elif disName1 == 'positive_tumour' and disName2 == 'positive_lymph':
line_color = (51, 97, 235)
else:
line_color = ptline_color[disName1]
csv_dir = '/data/Datasets/MediImgExp/data_csv'
img_dir = '/data/Datasets/MediImgExp/image'
if disName1 == 'all_tumour' and disName2 == 'positive_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
# positive tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
# other tumour: find the nearest lymph cell
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList3)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList3[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList3[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == 'all_tumour' and disName2 == 'all_lymph':
dis1_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + 'other_tumour' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis3_csv = pd.read_csv(csv_dir + '/' + picID + 'positive_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis4_csv = pd.read_csv(csv_dir + '/' + picID + 'other_lymph' + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
ptList3 = dis3_csv.values[:,:2]
ptList4 = dis4_csv.values[:,:2]
ptList6 = np.concatenate((ptList3, ptList4), axis=0)
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList6)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
minDisInd_list = []
for i in range(len(ptList2)):
currentPt = ptList2[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList6)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis2_csv = pd.concat([dis2_csv, pd.DataFrame(columns=['nearestInd'], data=minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList6[ptList1[i, 2],:2]), line_color, 1)
ptList2 = dis2_csv.values
for i in range(len(ptList2)):
img = cv2.line(img, tuple(ptList2[i,:2]), tuple(ptList6[ptList2[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 4, (0, 255, 0), -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 4, (0, 0, 255), -1)
for i in range(len(ptList3)):
img = cv2.circle(img, tuple(ptList3[i,:2]), 4, (255, 255, 0), -1)
for i in range(len(ptList4)):
img = cv2.circle(img, tuple(ptList4[i,:2]), 4, (255, 0, 0), -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 != disName2:
dis1_csv = pd.read_csv(csv_dir + '/' + picID + disName1 + '_pts.csv', usecols=['x_cord', 'y_cord'])
dis2_csv = pd.read_csv(csv_dir + '/' + picID + disName2 + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
ptList2 = dis2_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i,:]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList2)**2, axis=1))
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
img[:,:, 0] = 255
img[:,:, 1] = 255
img[:,:, 2] = 255
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]) , tuple(ptList2[ ptList1[i,2] ,:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 5, cellName_color[disName1], -1)
for i in range(len(ptList2)):
img = cv2.circle(img, tuple(ptList2[i,:2]), 5, cellName_color[disName2], -1)
cv2.imwrite(picID + disName1 + '_' + disName2 + '_dis.png', img)
elif disName1 == disName2:
dis1_csv = pd.read_csv(csv_dir + '/' + picID + disName1 + '_pts.csv', usecols=['x_cord', 'y_cord'])
ptList1 = dis1_csv.values[:,:2]
minDisInd_list = []
for i in range(len(ptList1)):
currentPt = ptList1[i, :2]
disMat = np.sqrt(np.sum(np.asarray(currentPt - ptList1)** 2, axis=1).astype(np.float32))
minDisInd = np.argmin(disMat)
disMat[minDisInd] = 1000.0
minDisInd = np.argmin(disMat)
minDisInd_list.append(minDisInd)
minDisInd = np.array(minDisInd_list).reshape(-1,1)
dis1_csv = pd.concat([dis1_csv, pd.DataFrame( columns=['nearestInd'],data = minDisInd)], axis=1)
img = cv2.imread(img_dir + '/' + picID + '.jpg')
img[:,:, 0] = 255
img[:,:, 1] = 255
img[:,:, 2] = 255
ptList1 = dis1_csv.values
for i in range(len(ptList1)):
img = cv2.line(img, tuple(ptList1[i,:2]), tuple(ptList1[ptList1[i, 2],:2]), line_color, 1)
for i in range(len(ptList1)):
img = cv2.circle(img, tuple(ptList1[i,:2]), 5, cellName_color[disName1], -1)
cv2.imwrite(picID + disName1 + '_dis.png', img)
def getAllPicsDisCSV():
'''
Get all distance data from the saved csv files (get from the above functions)
'''
base_dir = '/data/Datasets/MediImgExp'
f = open( base_dir + '/' + 'AllDisData.csv','w',encoding='utf-8',newline="")
csv_writer = csv.writer(f)
csv_writer.writerow([ 'Ind','PosiTumourRatio','PosiLymphRatio',
'DisTumourLymph','DisPosiTumour','DisPosiLymph',
'DisPosiTumourPosiLymph','DisTumourPosiLymph'])
process_dir = base_dir + '/process'
csv_dir = base_dir + '/data_csv'
pic_name = os.listdir(process_dir)
picIDList = []
for pic_name_ in pic_name:
picIDList.append( pic_name_.split('_')[0] )
for picID in picIDList:
list_data = []
list_data.append(picID)
# PosiTumourRatio
PosiTumourCsv = pd.read_csv( csv_dir+'/'+ picID +'positive_tumour_pts.csv')
OtherTumourCsv = pd.read_csv( csv_dir+'/'+ picID +'other_tumour_pts.csv')
Num_PosiTumour = PosiTumourCsv.shape[0]
Num_OtherTumour = OtherTumourCsv.shape[0]
if (Num_PosiTumour + Num_OtherTumour)!=0 :
PosiTumourRatio = Num_PosiTumour / (Num_PosiTumour + Num_OtherTumour)
else:
PosiTumourRatio = 'error'
list_data.append(PosiTumourRatio)
# PosiLymphRatio
PosiLymphCsv = pd.read_csv( csv_dir+'/'+ picID +'positive_lymph_pts.csv')
OtherLymphCsv = | pd.read_csv( csv_dir+'/'+ picID +'other_lymph_pts.csv') | pandas.read_csv |
import itertools
import pandas
import logging
import requests
import json
from datetime import datetime
_BASE_CRYPTO_COMPARE_URL = 'https://min-api.cryptocompare.com/data'
def load_crypto_compare_data(currencies, reference_currencies, exchange, time_scale):
"""
:param currencies: list of currency pairs to retrieve
:param reference_currencies: for quoting each currency in terms of reference currencies
:param exchange: exchange as referenced by CryptoCompare
:param time_scale: one of ('day', 'hour', 'minute', 'spot')
:return: DataFrame of historical prices
"""
cross_product = itertools.product(set(reference_currencies).union(set(currencies)), reference_currencies)
pairs = [pair for pair in cross_product if pair[0] != pair[1]]
if time_scale == 'spot':
prices = load_pairs_spot(pairs, exchange)
elif time_scale == 'minute':
prices = load_pairs_histo_minute(pairs, exchange)
elif time_scale == 'hour':
prices = load_pairs_histo_hourly(pairs, exchange)
elif time_scale == 'day':
prices = load_pairs_histo_daily(pairs, exchange)
else:
raise NotImplementedError('unavailable time scale: "{}"'.format(time_scale))
return prices.pivot_table(index='date', columns='currency', values='price').reset_index()
def load_pairs_spot(pairs, exchange):
"""
:param pairs:
:param exchange: exchange as referenced by CryptoCompare
:return:
"""
logging.debug('loading spot data for pairs: {}'.format(str(pairs)))
session = requests.Session()
spot_prices = dict()
for from_currency, to_currency in pairs:
payload = {'fsym': from_currency,
'tsyms': to_currency,
'e': exchange
}
results = session.get('{}/price'.format(_BASE_CRYPTO_COMPARE_URL), params=payload)
json_result = results.json()
if 'Message' in json_result:
json_message = json_result['Message']
message = 'Error occurred while loading prices from exchange {}: {} ({}/{})'
raise Exception(message.format(exchange, json_message, from_currency, to_currency))
spot_prices[(from_currency, to_currency)] = json_result[to_currency]
now = datetime.now()
timestamps = list()
currencies = list()
prices = list()
for source, target in spot_prices:
timestamps.append(now)
currencies.append('{}/{}'.format(source, target))
prices.append(spot_prices[(source, target)])
spot_df = pandas.DataFrame({'date': timestamps, 'currency': currencies, 'price': prices})
return spot_df
def load_pairs_histo_minute(pairs, exchange):
"""
:param pairs:
:param exchange: exchange as referenced by CryptoCompare
:return:
"""
logging.debug('loading minute data for pairs: {}'.format(str(pairs)))
session = requests.Session()
output = dict()
for from_currency, to_currency in pairs:
payload = {'fsym': from_currency,
'tsym': to_currency,
'e': exchange,
'limit': 1000
}
results = session.get('{}/histominute'.format(_BASE_CRYPTO_COMPARE_URL), params=payload)
output[(from_currency, to_currency)] = json.loads(results.text)['Data']
output = {
key: [
{'time': datetime.fromtimestamp(price_data['time']), 'close': price_data['close']}
for price_data in values] for key, values in output.items()
}
data = list()
for source, target in output:
for hist_data in output[(source, target)]:
entry = dict()
entry['currency'] = '{}/{}'.format(source, target)
entry['date'] = hist_data['time']
entry['price'] = hist_data['close']
data.append(entry)
minute_df = pandas.DataFrame(data)
return minute_df
def load_pairs_histo_hourly(pairs, exchange):
"""
:param pairs:
:param exchange: exchange as referenced by CryptoCompare
:return:
"""
logging.debug('loading hourly data for pairs: {}'.format(str(pairs)))
session = requests.Session()
output = dict()
for from_currency, to_currency in pairs:
payload = {'fsym': from_currency,
'tsym': to_currency,
'e': exchange,
'limit': 1000
}
results = session.get('{}/histohour'.format(_BASE_CRYPTO_COMPARE_URL), params=payload)
output[(from_currency, to_currency)] = json.loads(results.text)['Data']
output = {
key: [
{'time': datetime.fromtimestamp(price_data['time']), 'close': price_data['close']}
for price_data in values] for key, values in output.items()
}
data = list()
for source, target in output:
for hist_data in output[(source, target)]:
entry = dict()
entry['currency'] = '{}/{}'.format(source, target)
entry['date'] = hist_data['time']
entry['price'] = hist_data['close']
data.append(entry)
hourly_df = | pandas.DataFrame(data) | pandas.DataFrame |
#!/usr/bin/env python
from __future__ import print_function
import sys
import scipy
from numpy import *
from scipy import stats
import pandas as pd
import matplotlib.pyplot as plt
import glob
import re
import networkx as nx
from itertools import combinations, product
from scipy.interpolate import interp1d
import argparse
import logging
#logging setup
console = logging.StreamHandler(sys.stdout)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console.setFormatter(formatter)
LOG = logging.getLogger("Align RT of spectral libraries")
LOG.addHandler(console)
LOG.setLevel(logging.INFO)
def align_libs(reference, other, rsq_treshold):
# read first library, groupby modified sequence and charge and store RTs
df = reference
df_rt = df.groupby(['ModifiedPeptideSequence', 'PrecursorCharge'])['NormalizedRetentionTime'].apply(
median).reset_index()
df_rt['Key'] = df_rt['ModifiedPeptideSequence'] + df_rt['PrecursorCharge'].apply(str)
df_rt = df_rt[['Key', 'NormalizedRetentionTime']]
# read second library, groupby modified sequence and charge and store RTs
df_II = other
df_rt_II = df_II.groupby(['ModifiedPeptideSequence', 'PrecursorCharge'])['NormalizedRetentionTime'].apply(
median).reset_index()
df_rt_II['Key'] = df_rt_II['ModifiedPeptideSequence'] + df_rt_II['PrecursorCharge'].apply(str)
df_rt_II = df_rt_II[['Key', 'NormalizedRetentionTime']]
# calibrate rt alignment
df_rt_merged = pd.merge(df_rt, df_rt_II, on='Key')
slope, intercept, r_value, p_value, std_err = stats.linregress(df_rt_merged['NormalizedRetentionTime_x'], df_rt_merged['NormalizedRetentionTime_y'])
(a, b) = (slope, intercept)
rsq = r_value ** 2
if rsq < rsq_threshold:
raise Exception("Error: R-squared " + str(rsq) + " is below the threshold of " + str(rsq_threshold) + ".")
# apply rt transformation
for i, row in df_II.iterrows():
rt = row["NormalizedRetentionTime"]
new_rt = scipy.polyval([a, b], rt)
df_II.loc[i, "NormalizedRetentionTime"] = new_rt
return df_II
def compute_MST(libs):
file_combs = combinations(libs, 2)
G=nx.Graph()
for file_comb in file_combs:
lib_I=pd.read_csv(file_comb[0], sep='\t') #.split('/')[-1].split('_Class')[0][3:]
lib_II= | pd.read_csv(file_comb[1], sep='\t') | pandas.read_csv |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import pandas as pd
import json
from matplotlib import pyplot as plt
import numpy as np
# Configuration
anomaly_color = 'sandybrown'
prediction_color = 'yellowgreen'
training_color = 'yellowgreen'
validation_color = 'gold'
test_color = 'coral'
figsize=(12, 4)
def load_series(file_name, data_folder):
# Load the input data
data_path = f'{data_folder}/data/{file_name}'
data = pd.read_csv(data_path)
data['timestamp'] = pd.to_datetime(data['timestamp'])
data.set_index('timestamp', inplace=True)
# Load the labels
label_path = f'{data_folder}/labels/combined_labels.json'
with open(label_path) as fp:
labels = pd.Series(json.load(fp)[file_name])
labels = | pd.to_datetime(labels) | pandas.to_datetime |
import re
import numpy as np
import pytest
import pandas as pd
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
class TestSeriesReplace:
def test_replace_explicit_none(self):
# GH#36984 if the user explicitly passes value=None, give it to them
ser = pd.Series([0, 0, ""], dtype=object)
result = ser.replace("", None)
expected = pd.Series([0, 0, None], dtype=object)
tm.assert_series_equal(result, expected)
df = pd.DataFrame(np.zeros((3, 3)))
df.iloc[2, 2] = ""
result = df.replace("", None)
expected = pd.DataFrame(
{
0: np.zeros(3),
1: np.zeros(3),
2: np.array([0.0, 0.0, None], dtype=object),
}
)
assert expected.iloc[2, 2] is None
tm.assert_frame_equal(result, expected)
# GH#19998 same thing with object dtype
ser = pd.Series([10, 20, 30, "a", "a", "b", "a"])
result = ser.replace("a", None)
expected = pd.Series([10, 20, 30, None, None, "b", None])
assert expected.iloc[-1] is None
tm.assert_series_equal(result, expected)
def test_replace_noop_doesnt_downcast(self):
# GH#44498
ser = pd.Series([None, None, pd.Timestamp("2021-12-16 17:31")], dtype=object)
res = ser.replace({np.nan: None}) # should be a no-op
tm.assert_series_equal(res, ser)
assert res.dtype == object
# same thing but different calling convention
res = ser.replace(np.nan, None)
tm.assert_series_equal(res, ser)
assert res.dtype == object
def test_replace(self):
N = 100
ser = pd.Series(np.random.randn(N))
ser[0:4] = np.nan
ser[6:10] = 0
# replace list with a single value
return_value = ser.replace([np.nan], -1, inplace=True)
assert return_value is None
exp = ser.fillna(-1)
tm.assert_series_equal(ser, exp)
rs = ser.replace(0.0, np.nan)
ser[ser == 0.0] = np.nan
tm.assert_series_equal(rs, ser)
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_nan_with_inf(self):
ser = pd.Series([np.nan, 0, np.inf])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
ser = pd.Series([np.nan, 0, "foo", "bar", np.inf, None, pd.NaT])
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
filled = ser.copy()
filled[4] = 0
tm.assert_series_equal(ser.replace(np.inf, 0), filled)
def test_replace_listlike_value_listlike_target(self, datetime_series):
ser = pd.Series(datetime_series.index)
tm.assert_series_equal(ser.replace(np.nan, 0), ser.fillna(0))
# malformed
msg = r"Replacement lists must match in length\. Expecting 3 got 2"
with pytest.raises(ValueError, match=msg):
ser.replace([1, 2, 3], [np.nan, 0])
# ser is dt64 so can't hold 1 or 2, so this replace is a no-op
result = ser.replace([1, 2], [np.nan, 0])
tm.assert_series_equal(result, ser)
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([0, 1, 2, 3, 4], [4, 3, 2, 1, 0])
tm.assert_series_equal(result, pd.Series([4, 3, 2, 1, 0]))
def test_replace_gh5319(self):
# API change from 0.12?
# GH 5319
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace([np.nan])
tm.assert_series_equal(result, expected)
ser = pd.Series([0, np.nan, 2, 3, 4])
expected = ser.ffill()
result = ser.replace(np.nan)
tm.assert_series_equal(result, expected)
def test_replace_datetime64(self):
# GH 5797
ser = pd.Series(pd.date_range("20130101", periods=5))
expected = ser.copy()
expected.loc[2] = pd.Timestamp("20120101")
result = ser.replace({pd.Timestamp("20130103"): pd.Timestamp("20120101")})
tm.assert_series_equal(result, expected)
result = ser.replace(pd.Timestamp("20130103"), pd.Timestamp("20120101"))
tm.assert_series_equal(result, expected)
def test_replace_nat_with_tz(self):
# GH 11792: Test with replacing NaT in a list with tz data
ts = pd.Timestamp("2015/01/01", tz="UTC")
s = pd.Series([pd.NaT, pd.Timestamp("2015/01/01", tz="UTC")])
result = s.replace([np.nan, pd.NaT], pd.Timestamp.min)
expected = pd.Series([pd.Timestamp.min, ts], dtype=object)
tm.assert_series_equal(expected, result)
def test_replace_timedelta_td64(self):
tdi = pd.timedelta_range(0, periods=5)
ser = pd.Series(tdi)
# Using a single dict argument means we go through replace_list
result = ser.replace({ser[1]: ser[3]})
expected = pd.Series([ser[0], ser[3], ser[2], ser[3], ser[4]])
tm.assert_series_equal(result, expected)
def test_replace_with_single_list(self):
ser = pd.Series([0, 1, 2, 3, 4])
result = ser.replace([1, 2, 3])
tm.assert_series_equal(result, pd.Series([0, 0, 0, 0, 4]))
s = ser.copy()
return_value = s.replace([1, 2, 3], inplace=True)
assert return_value is None
tm.assert_series_equal(s, pd.Series([0, 0, 0, 0, 4]))
# make sure things don't get corrupted when fillna call fails
s = ser.copy()
msg = (
r"Invalid fill method\. Expecting pad \(ffill\) or backfill "
r"\(bfill\)\. Got crash_cymbal"
)
with pytest.raises(ValueError, match=msg):
return_value = s.replace([1, 2, 3], inplace=True, method="crash_cymbal")
assert return_value is None
tm.assert_series_equal(s, ser)
def test_replace_mixed_types(self):
ser = pd.Series(np.arange(5), dtype="int64")
def check_replace(to_rep, val, expected):
sc = ser.copy()
result = ser.replace(to_rep, val)
return_value = sc.replace(to_rep, val, inplace=True)
assert return_value is None
tm.assert_series_equal(expected, result)
tm.assert_series_equal(expected, sc)
# 3.0 can still be held in our int64 series, so we do not upcast GH#44940
tr, v = [3], [3.0]
check_replace(tr, v, ser)
# Note this matches what we get with the scalars 3 and 3.0
check_replace(tr[0], v[0], ser)
# MUST upcast to float
e = pd.Series([0, 1, 2, 3.5, 4])
tr, v = [3], [3.5]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, "a"])
tr, v = [3, 4], [3.5, "a"]
check_replace(tr, v, e)
# again casts to object
e = pd.Series([0, 1, 2, 3.5, pd.Timestamp("20130101")])
tr, v = [3, 4], [3.5, pd.Timestamp("20130101")]
check_replace(tr, v, e)
# casts to object
e = pd.Series([0, 1, 2, 3.5, True], dtype="object")
tr, v = [3, 4], [3.5, True]
check_replace(tr, v, e)
# test an object with dates + floats + integers + strings
dr = pd.Series(pd.date_range("1/1/2001", "1/10/2001", freq="D"))
result = dr.astype(object).replace([dr[0], dr[1], dr[2]], [1.0, 2, "a"])
expected = pd.Series([1.0, 2, "a"] + dr[3:].tolist(), dtype=object)
tm.assert_series_equal(result, expected)
def test_replace_bool_with_string_no_op(self):
s = pd.Series([True, False, True])
result = s.replace("fun", "in-the-sun")
tm.assert_series_equal(s, result)
def test_replace_bool_with_string(self):
# nonexistent elements
s = pd.Series([True, False, True])
result = s.replace(True, "2u")
expected = pd.Series(["2u", False, "2u"])
tm.assert_series_equal(expected, result)
def test_replace_bool_with_bool(self):
s = pd.Series([True, False, True])
result = s.replace(True, False)
expected = pd.Series([False] * len(s))
tm.assert_series_equal(expected, result)
def test_replace_with_dict_with_bool_keys(self):
s = pd.Series([True, False, True])
result = s.replace({"asdf": "asdb", True: "yes"})
expected = pd.Series(["yes", False, "yes"])
tm.assert_series_equal(result, expected)
def test_replace_Int_with_na(self, any_int_ea_dtype):
# GH 38267
result = pd.Series([0, None], dtype=any_int_ea_dtype).replace(0, pd.NA)
expected = pd.Series([pd.NA, pd.NA], dtype=any_int_ea_dtype)
tm.assert_series_equal(result, expected)
result = pd.Series([0, 1], dtype=any_int_ea_dtype).replace(0, pd.NA)
result.replace(1, pd.NA, inplace=True)
tm.assert_series_equal(result, expected)
def test_replace2(self):
N = 100
ser = pd.Series(np.fabs(np.random.randn(N)), tm.makeDateIndex(N), dtype=object)
ser[:5] = np.nan
ser[6:10] = "foo"
ser[20:30] = "bar"
# replace list with a single value
rs = ser.replace([np.nan, "foo", "bar"], -1)
assert (rs[:5] == -1).all()
assert (rs[6:10] == -1).all()
assert (rs[20:30] == -1).all()
assert (pd.isna(ser[:5])).all()
# replace with different values
rs = ser.replace({np.nan: -1, "foo": -2, "bar": -3})
assert (rs[:5] == -1).all()
assert (rs[6:10] == -2).all()
assert (rs[20:30] == -3).all()
assert (pd.isna(ser[:5])).all()
# replace with different values with 2 lists
rs2 = ser.replace([np.nan, "foo", "bar"], [-1, -2, -3])
tm.assert_series_equal(rs, rs2)
# replace inplace
return_value = ser.replace([np.nan, "foo", "bar"], -1, inplace=True)
assert return_value is None
assert (ser[:5] == -1).all()
assert (ser[6:10] == -1).all()
assert (ser[20:30] == -1).all()
def test_replace_with_dictlike_and_string_dtype(self, nullable_string_dtype):
# GH 32621, GH#44940
ser = pd.Series(["one", "two", np.nan], dtype=nullable_string_dtype)
expected = pd.Series(["1", "2", np.nan], dtype=nullable_string_dtype)
result = ser.replace({"one": "1", "two": "2"})
tm.assert_series_equal(expected, result)
def test_replace_with_empty_dictlike(self):
# GH 15289
s = pd.Series(list("abcd"))
tm.assert_series_equal(s, s.replace({}))
with | tm.assert_produces_warning(FutureWarning) | pandas._testing.assert_produces_warning |
# -*- coding: utf-8 -*-
"""
This file is part of the Shotgun Lipidomics Assistant (SLA) project.
Copyright 2020 <NAME> (UCLA), <NAME> (UCLA), <NAME> (UW).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# import numpy as np
import pandas as pd
from pyopenms import *
import os
# import glob
# import re
import matplotlib
import matplotlib.pyplot as plt
# from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
# from matplotlib.figure import Figure
import seaborn as sns
# from scipy import stats
from tkinter import *
# import tkinter as tk
from tkinter import ttk
# from tkinter import messagebox
# from tkinter.messagebox import showinfo
from tkinter import filedialog
import datetime
def get_tune1(tunef1):
tunef1.configure(state="normal")
tunef1.delete(1.0, END)
filedir = filedialog.askopenfilename(filetypes=(("mzML Files", "*.mzML"), ("all files", "*.*")))
tunef1.insert(INSERT, filedir)
tunef1.configure(state="disabled")
def get_tune2(tunef2):
# setdir = filedialog.askdirectory()
tunef2.configure(state="normal")
tunef2.delete(1.0, END)
filedir = filedialog.askopenfilename(filetypes=(("mzML Files", "*.mzML"), ("all files", "*.*")))
tunef2.insert(INSERT, filedir)
tunef2.configure(state="disabled")
def imp_tunekey(maploc_tune):
# map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"),("all files", "*.*")))
maploc_tune.configure(state="normal")
maploc_tune.delete(1.0, END)
map1 = filedialog.askopenfilename(filetypes=(("excel Files", "*.xlsx"), ("all files", "*.*")))
maploc_tune.insert(INSERT, map1)
maploc_tune.configure(state="disabled")
def exportdata(covlist, covlist_dict, maploc_tune):
"""export result to excel file"""
exp_temp_loc = maploc_tune.get('1.0', 'end-1c') # 'Tuning_spname_dict'
neg_temp = pd.read_excel(exp_temp_loc, sheet_name='NEG', header=0, index_col=None, na_values='.')
pos_temp = pd.read_excel(exp_temp_loc, sheet_name='POS', header=0, index_col=None, na_values='.')
sst_temp = pd.read_excel(exp_temp_loc, sheet_name='SST', header=0, index_col=None, na_values='.')
covlist_neg = list(neg_temp['Group'].unique()) # + list(pos_temp['Group'].unique())
covlist_pos = list(pos_temp['Group'].unique())
# fill in
for i in range(0, len(covlist_neg)):
# tvar = covlist_dict[i].get('1.0', 'end-1c')
tvar = covlist_dict[i].get()
neg_temp.loc[neg_temp['Group'] == covlist_neg[i], 'volt'] = float(tvar)
# messagebox.showinfo(tvar)
# auto compute when any LPC/LPE entered 0
try:
if any(neg_temp.loc[neg_temp['Group'] == 'LPC16', 'volt'] == 0):
neg_temp.loc[neg_temp['Group'] == 'LPC16', 'volt'] = \
neg_temp.loc[neg_temp['Group'] == 'LPC18', 'volt'].iloc[0] - 1
elif any(neg_temp.loc[neg_temp['Group'] == 'LPC18', 'volt'] == 0):
neg_temp.loc[neg_temp['Group'] == 'LPC18', 'volt'] = \
neg_temp.loc[neg_temp['Group'] == 'LPC16', 'volt'].iloc[0] + 1
except Exception:
pass
try:
if any(neg_temp.loc[neg_temp['Group'] == 'LPE16', 'volt'] == 0):
neg_temp.loc[neg_temp['Group'] == 'LPE16', 'volt'] = \
neg_temp.loc[neg_temp['Group'] == 'LPE18', 'volt'].iloc[0] - 1.5
elif any(neg_temp.loc[neg_temp['Group'] == 'LPE18', 'volt'] == 0):
neg_temp.loc[neg_temp['Group'] == 'LPE18', 'volt'] = \
neg_temp.loc[neg_temp['Group'] == 'LPE16', 'volt'].iloc[0] + 1.5
except Exception:
pass
for i in range(0, len(covlist_pos)):
# tvar = covlist_dict[i+len(covlist_neg)].get('1.0', 'end-1c')
tvar = covlist_dict[i + len(covlist_neg)].get()
pos_temp.loc[pos_temp['Group'] == covlist_pos[i], 'volt'] = float(tvar)
# SST result
for i in sst_temp['Group'].unique():
sst_temp.loc[sst_temp['Group'] == i, 'volt'] = float(covlist_dict[covlist.index(i)].get())
# save
fname = 'TuneResult_' + datetime.datetime.now().strftime("%Y%m%d%H%M") + '.xlsx'
master = pd.ExcelWriter(fname)
neg_temp.to_excel(master, 'NEG', index=True)
pos_temp.to_excel(master, 'POS', index=True)
sst_temp.to_excel(master, 'SST', index=True)
master.save()
os.startfile(fname)
# os.system(fname)
# messagebox.showinfo("Information","Done")
def fetchLPC(covlist_dict):
"""
not in use. originally designed to auto update LPC peak
"""
covlist_dict[3].configure(state=NORMAL)
covlist_dict[3].delete(0, END)
try:
covlist_dict[3].insert(0, float(covlist_dict[2].get()) + 1)
covlist_dict[3].configure(state=DISABLED)
except Exception:
covlist_dict[3].insert(0, float(0))
covlist_dict[3].configure(state=DISABLED)
def fetchLPE(covlist_dict):
"""
not in use. originally designed to auto update LPE peak
"""
covlist_dict[4].configure(state=NORMAL)
covlist_dict[4].delete(0, END)
try:
covlist_dict[4].insert(0, float(covlist_dict[5].get()) - 1.5)
covlist_dict[4].configure(state=DISABLED)
except Exception:
covlist_dict[4].insert(0, float(0))
covlist_dict[4].configure(state=DISABLED)
def TuningFun(tunef1, tunef2, maploc_tune, out_text, variable_peaktype, tab1, covlist, covlist_label, covlist_dict):
global bmid, tvar, bvar, idc5, idc9
out_text.configure(state="normal")
sp_dict1_loc = maploc_tune.get('1.0', 'end-1c')
# std_dict_loc =
# 'C:/Users/baolongsu/Desktop/Projects/StdUnkRatio/standard_dict - LipidizerSimulate_102b_MW_dev2.xlsx'
file = tunef1.get('1.0', 'end-1c')
os.chdir(file[0:file.rfind('/')])
##get name all files
list_of_files = [tunef1.get('1.0', 'end-1c'),
tunef2.get('1.0', 'end-1c')]
def spName(row):
return (sp_dict[method].loc[
(pd.Series(sp_dict[method]['Q1'] == row['Q1']) & pd.Series(sp_dict[method]['Q3'] == row['Q3']))].index[0])
##create all variable
# all_df_dict = {'1': {}, '2': {}}
# out_df2 = pd.DataFrame()
# out_df2 = {'1': pd.DataFrame(), '2': pd.DataFrame()}
# out_df2_con = pd.DataFrame()
# out_df2_con = {'1': pd.DataFrame(), '2': pd.DataFrame()}
# out_df2_intensity = {'1': pd.DataFrame(), '2': pd.DataFrame()}
sp_df3 = {'A': 0}
##read dicts
sp_dict = {'1': pd.read_excel(sp_dict1_loc, sheet_name='1', header=0, index_col=-1, na_values='.'),
'2': pd.read_excel(sp_dict1_loc, sheet_name='2', header=0, index_col=-1, na_values='.'),
'NEG': pd.read_excel(sp_dict1_loc, sheet_name='NEG', header=0, index_col=None, na_values='.'),
'POS': | pd.read_excel(sp_dict1_loc, sheet_name='POS', header=0, index_col=None, na_values='.') | pandas.read_excel |
from datetime import datetime, timedelta
import warnings
import operator
from textwrap import dedent
import numpy as np
from pandas._libs import (lib, index as libindex, tslib as libts,
algos as libalgos, join as libjoin,
Timedelta)
from pandas._libs.lib import is_datetime_array
from pandas.compat import range, u, set_function_name
from pandas.compat.numpy import function as nv
from pandas import compat
from pandas.core.accessor import CachedAccessor
from pandas.core.arrays import ExtensionArray
from pandas.core.dtypes.generic import (
ABCSeries, ABCDataFrame,
ABCMultiIndex,
ABCPeriodIndex, ABCTimedeltaIndex,
ABCDateOffset)
from pandas.core.dtypes.missing import isna, array_equivalent
from pandas.core.dtypes.common import (
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_platform_int,
is_integer,
is_float,
is_dtype_equal,
is_dtype_union_equal,
is_object_dtype,
is_categorical,
is_categorical_dtype,
is_interval_dtype,
is_period_dtype,
is_bool,
is_bool_dtype,
is_signed_integer_dtype,
is_unsigned_integer_dtype,
is_integer_dtype, is_float_dtype,
is_datetime64_any_dtype,
is_datetime64tz_dtype,
is_timedelta64_dtype,
is_hashable,
needs_i8_conversion,
is_iterator, is_list_like,
is_scalar)
from pandas.core.base import PandasObject, IndexOpsMixin
import pandas.core.common as com
from pandas.core import ops
from pandas.util._decorators import (
Appender, Substitution, cache_readonly, deprecate_kwarg)
from pandas.core.indexes.frozen import FrozenList
import pandas.core.dtypes.concat as _concat
import pandas.core.missing as missing
import pandas.core.algorithms as algos
import pandas.core.sorting as sorting
from pandas.io.formats.printing import (
pprint_thing, default_pprint, format_object_summary, format_object_attrs)
from pandas.core.ops import make_invalid_op
from pandas.core.strings import StringMethods
__all__ = ['Index']
_unsortable_types = frozenset(('mixed', 'mixed-integer'))
_index_doc_kwargs = dict(klass='Index', inplace='',
target_klass='Index',
unique='Index', duplicated='np.ndarray')
_index_shared_docs = dict()
def _try_get_item(x):
try:
return x.item()
except AttributeError:
return x
def _make_comparison_op(op, cls):
def cmp_method(self, other):
if isinstance(other, (np.ndarray, Index, ABCSeries)):
if other.ndim > 0 and len(self) != len(other):
raise ValueError('Lengths must match to compare')
# we may need to directly compare underlying
# representations
if needs_i8_conversion(self) and needs_i8_conversion(other):
return self._evaluate_compare(other, op)
if is_object_dtype(self) and self.nlevels == 1:
# don't pass MultiIndex
with np.errstate(all='ignore'):
result = ops._comp_method_OBJECT_ARRAY(op, self.values, other)
else:
# numpy will show a DeprecationWarning on invalid elementwise
# comparisons, this will raise in the future
with warnings.catch_warnings(record=True):
with np.errstate(all='ignore'):
result = op(self.values, np.asarray(other))
# technically we could support bool dtyped Index
# for now just return the indexing array directly
if is_bool_dtype(result):
return result
try:
return Index(result)
except TypeError:
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(cmp_method, name, cls)
def _make_arithmetic_op(op, cls):
def index_arithmetic_method(self, other):
if isinstance(other, (ABCSeries, ABCDataFrame)):
return NotImplemented
elif isinstance(other, ABCTimedeltaIndex):
# Defer to subclass implementation
return NotImplemented
other = self._validate_for_numeric_binop(other, op)
# handle time-based others
if isinstance(other, (ABCDateOffset, np.timedelta64, timedelta)):
return self._evaluate_with_timedelta_like(other, op)
elif isinstance(other, (datetime, np.datetime64)):
return self._evaluate_with_datetime_like(other, op)
values = self.values
with np.errstate(all='ignore'):
result = op(values, other)
result = missing.dispatch_missing(op, values, other, result)
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
if op is divmod:
result = (Index(result[0], **attrs), Index(result[1], **attrs))
else:
result = Index(result, **attrs)
return result
name = '__{name}__'.format(name=op.__name__)
# TODO: docstring?
return set_function_name(index_arithmetic_method, name, cls)
class InvalidIndexError(Exception):
pass
_o_dtype = np.dtype(object)
_Identity = object
def _new_Index(cls, d):
""" This is called upon unpickling, rather than the default which doesn't
have arguments and breaks __new__
"""
# required for backward compat, because PI can't be instantiated with
# ordinals through __new__ GH #13277
if issubclass(cls, ABCPeriodIndex):
from pandas.core.indexes.period import _new_PeriodIndex
return _new_PeriodIndex(cls, **d)
return cls.__new__(cls, **d)
class Index(IndexOpsMixin, PandasObject):
"""
Immutable ndarray implementing an ordered, sliceable set. The basic object
storing axis labels for all pandas objects
Parameters
----------
data : array-like (1-dimensional)
dtype : NumPy dtype (default: object)
If dtype is None, we find the dtype that best fits the data.
If an actual dtype is provided, we coerce to that dtype if it's safe.
Otherwise, an error will be raised.
copy : bool
Make a copy of input ndarray
name : object
Name to be stored in the index
tupleize_cols : bool (default: True)
When True, attempt to create a MultiIndex if possible
Notes
-----
An Index instance can **only** contain hashable objects
Examples
--------
>>> pd.Index([1, 2, 3])
Int64Index([1, 2, 3], dtype='int64')
>>> pd.Index(list('abc'))
Index(['a', 'b', 'c'], dtype='object')
See Also
---------
RangeIndex : Index implementing a monotonic integer range
CategoricalIndex : Index of :class:`Categorical` s.
MultiIndex : A multi-level, or hierarchical, Index
IntervalIndex : an Index of :class:`Interval` s.
DatetimeIndex, TimedeltaIndex, PeriodIndex
Int64Index, UInt64Index, Float64Index
"""
# To hand over control to subclasses
_join_precedence = 1
# Cython methods
_left_indexer_unique = libjoin.left_join_indexer_unique_object
_left_indexer = libjoin.left_join_indexer_object
_inner_indexer = libjoin.inner_join_indexer_object
_outer_indexer = libjoin.outer_join_indexer_object
_typ = 'index'
_data = None
_id = None
name = None
asi8 = None
_comparables = ['name']
_attributes = ['name']
_is_numeric_dtype = False
_can_hold_na = True
# would we like our indexing holder to defer to us
_defer_to_indexing = False
# prioritize current class for _shallow_copy_with_infer,
# used to infer integers as datetime-likes
_infer_as_myclass = False
_engine_type = libindex.ObjectEngine
_accessors = set(['str'])
str = CachedAccessor("str", StringMethods)
def __new__(cls, data=None, dtype=None, copy=False, name=None,
fastpath=False, tupleize_cols=True, **kwargs):
if name is None and hasattr(data, 'name'):
name = data.name
if fastpath:
return cls._simple_new(data, name)
from .range import RangeIndex
# range
if isinstance(data, RangeIndex):
return RangeIndex(start=data, copy=copy, dtype=dtype, name=name)
elif isinstance(data, range):
return RangeIndex.from_range(data, copy=copy, dtype=dtype,
name=name)
# categorical
if is_categorical_dtype(data) or is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(data, dtype=dtype, copy=copy, name=name,
**kwargs)
# interval
if is_interval_dtype(data) or is_interval_dtype(dtype):
from .interval import IntervalIndex
closed = kwargs.get('closed', None)
return IntervalIndex(data, dtype=dtype, name=name, copy=copy,
closed=closed)
# index-like
elif isinstance(data, (np.ndarray, Index, ABCSeries)):
if (is_datetime64_any_dtype(data) or
(dtype is not None and is_datetime64_any_dtype(dtype)) or
'tz' in kwargs):
from pandas.core.indexes.datetimes import DatetimeIndex
result = DatetimeIndex(data, copy=copy, name=name,
dtype=dtype, **kwargs)
if dtype is not None and is_dtype_equal(_o_dtype, dtype):
return Index(result.to_pydatetime(), dtype=_o_dtype)
else:
return result
elif (is_timedelta64_dtype(data) or
(dtype is not None and is_timedelta64_dtype(dtype))):
from pandas.core.indexes.timedeltas import TimedeltaIndex
result = TimedeltaIndex(data, copy=copy, name=name, **kwargs)
if dtype is not None and _o_dtype == dtype:
return Index(result.to_pytimedelta(), dtype=_o_dtype)
else:
return result
if dtype is not None:
try:
# we need to avoid having numpy coerce
# things that look like ints/floats to ints unless
# they are actually ints, e.g. '0' and 0.0
# should not be coerced
# GH 11836
if is_integer_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'integer':
try:
data = np.array(data, copy=copy, dtype=dtype)
except OverflowError:
# gh-15823: a more user-friendly error message
raise OverflowError(
"the elements provided in the data cannot "
"all be casted to the dtype {dtype}"
.format(dtype=dtype))
elif inferred in ['floating', 'mixed-integer-float']:
if isna(data).any():
raise ValueError('cannot convert float '
'NaN to integer')
# If we are actually all equal to integers,
# then coerce to integer.
try:
return cls._try_convert_to_int_index(
data, copy, name, dtype)
except ValueError:
pass
# Return an actual float index.
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype,
name=name)
elif inferred == 'string':
pass
else:
data = data.astype(dtype)
elif is_float_dtype(dtype):
inferred = lib.infer_dtype(data)
if inferred == 'string':
pass
else:
data = data.astype(dtype)
else:
data = np.array(data, dtype=dtype, copy=copy)
except (TypeError, ValueError) as e:
msg = str(e)
if 'cannot convert float' in msg:
raise
# maybe coerce to a sub-class
from pandas.core.indexes.period import (
PeriodIndex, IncompatibleFrequency)
if isinstance(data, PeriodIndex):
return PeriodIndex(data, copy=copy, name=name, **kwargs)
if is_signed_integer_dtype(data.dtype):
from .numeric import Int64Index
return Int64Index(data, copy=copy, dtype=dtype, name=name)
elif is_unsigned_integer_dtype(data.dtype):
from .numeric import UInt64Index
return UInt64Index(data, copy=copy, dtype=dtype, name=name)
elif is_float_dtype(data.dtype):
from .numeric import Float64Index
return Float64Index(data, copy=copy, dtype=dtype, name=name)
elif issubclass(data.dtype.type, np.bool) or is_bool_dtype(data):
subarr = data.astype('object')
else:
subarr = com._asarray_tuplesafe(data, dtype=object)
# _asarray_tuplesafe does not always copy underlying data,
# so need to make sure that this happens
if copy:
subarr = subarr.copy()
if dtype is None:
inferred = lib.infer_dtype(subarr)
if inferred == 'integer':
try:
return cls._try_convert_to_int_index(
subarr, copy, name, dtype)
except ValueError:
pass
return Index(subarr, copy=copy,
dtype=object, name=name)
elif inferred in ['floating', 'mixed-integer-float']:
from .numeric import Float64Index
return Float64Index(subarr, copy=copy, name=name)
elif inferred == 'interval':
from .interval import IntervalIndex
return IntervalIndex(subarr, name=name, copy=copy)
elif inferred == 'boolean':
# don't support boolean explicitly ATM
pass
elif inferred != 'string':
if inferred.startswith('datetime'):
if (lib.is_datetime_with_singletz_array(subarr) or
'tz' in kwargs):
# only when subarr has the same tz
from pandas.core.indexes.datetimes import (
DatetimeIndex)
try:
return DatetimeIndex(subarr, copy=copy,
name=name, **kwargs)
except libts.OutOfBoundsDatetime:
pass
elif inferred.startswith('timedelta'):
from pandas.core.indexes.timedeltas import (
TimedeltaIndex)
return TimedeltaIndex(subarr, copy=copy, name=name,
**kwargs)
elif inferred == 'period':
try:
return PeriodIndex(subarr, name=name, **kwargs)
except IncompatibleFrequency:
pass
return cls._simple_new(subarr, name)
elif hasattr(data, '__array__'):
return Index(np.asarray(data), dtype=dtype, copy=copy, name=name,
**kwargs)
elif data is None or is_scalar(data):
cls._scalar_data_error(data)
else:
if tupleize_cols and is_list_like(data) and data:
if is_iterator(data):
data = list(data)
# we must be all tuples, otherwise don't construct
# 10697
if all(isinstance(e, tuple) for e in data):
from .multi import MultiIndex
return MultiIndex.from_tuples(
data, names=name or kwargs.get('names'))
# other iterable of some kind
subarr = com._asarray_tuplesafe(data, dtype=object)
return Index(subarr, dtype=dtype, copy=copy, name=name, **kwargs)
"""
NOTE for new Index creation:
- _simple_new: It returns new Index with the same type as the caller.
All metadata (such as name) must be provided by caller's responsibility.
Using _shallow_copy is recommended because it fills these metadata
otherwise specified.
- _shallow_copy: It returns new Index with the same type (using
_simple_new), but fills caller's metadata otherwise specified. Passed
kwargs will overwrite corresponding metadata.
- _shallow_copy_with_infer: It returns new Index inferring its type
from passed values. It fills caller's metadata otherwise specified as the
same as _shallow_copy.
See each method's docstring.
"""
@classmethod
def _simple_new(cls, values, name=None, dtype=None, **kwargs):
"""
we require the we have a dtype compat for the values
if we are passed a non-dtype compat, then coerce using the constructor
Must be careful not to recurse.
"""
if not hasattr(values, 'dtype'):
if (values is None or not len(values)) and dtype is not None:
values = np.empty(0, dtype=dtype)
else:
values = np.array(values, copy=False)
if is_object_dtype(values):
values = cls(values, name=name, dtype=dtype,
**kwargs)._ndarray_values
result = object.__new__(cls)
result._data = values
result.name = name
for k, v in compat.iteritems(kwargs):
setattr(result, k, v)
return result._reset_identity()
_index_shared_docs['_shallow_copy'] = """
create a new Index with the same class as the caller, don't copy the
data, use the same object attributes with passed in attributes taking
precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
@Appender(_index_shared_docs['_shallow_copy'])
def _shallow_copy(self, values=None, **kwargs):
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
return self._simple_new(values, **attributes)
def _shallow_copy_with_infer(self, values=None, **kwargs):
"""
create a new Index inferring the class with passed value, don't copy
the data, use the same object attributes with passed in attributes
taking precedence
*this is an internal non-public method*
Parameters
----------
values : the values to create the new Index, optional
kwargs : updates the default attributes for this Index
"""
if values is None:
values = self.values
attributes = self._get_attributes_dict()
attributes.update(kwargs)
attributes['copy'] = False
if not len(values) and 'dtype' not in kwargs:
attributes['dtype'] = self.dtype
if self._infer_as_myclass:
try:
return self._constructor(values, **attributes)
except (TypeError, ValueError):
pass
return Index(values, **attributes)
def _deepcopy_if_needed(self, orig, copy=False):
"""
.. versionadded:: 0.19.0
Make a copy of self if data coincides (in memory) with orig.
Subclasses should override this if self._base is not an ndarray.
Parameters
----------
orig : ndarray
other ndarray to compare self._data against
copy : boolean, default False
when False, do not run any check, just return self
Returns
-------
A copy of self if needed, otherwise self : Index
"""
if copy:
# Retrieve the "base objects", i.e. the original memory allocations
if not isinstance(orig, np.ndarray):
# orig is a DatetimeIndex
orig = orig.values
orig = orig if orig.base is None else orig.base
new = self._data if self._data.base is None else self._data.base
if orig is new:
return self.copy(deep=True)
return self
def _update_inplace(self, result, **kwargs):
# guard when called from IndexOpsMixin
raise TypeError("Index can't be updated inplace")
def _sort_levels_monotonic(self):
""" compat with MultiIndex """
return self
_index_shared_docs['_get_grouper_for_level'] = """
Get index grouper corresponding to an index level
Parameters
----------
mapper: Group mapping function or None
Function mapping index values to groups
level : int or None
Index level
Returns
-------
grouper : Index
Index of values to group on
labels : ndarray of int or None
Array of locations in level_index
uniques : Index or None
Index of unique values for level
"""
@Appender(_index_shared_docs['_get_grouper_for_level'])
def _get_grouper_for_level(self, mapper, level=None):
assert level is None or level == 0
if mapper is None:
grouper = self
else:
grouper = self.map(mapper)
return grouper, None, None
def is_(self, other):
"""
More flexible, faster check like ``is`` but that works through views
Note: this is *not* the same as ``Index.identical()``, which checks
that metadata is also the same.
Parameters
----------
other : object
other object to compare against.
Returns
-------
True if both have same underlying data, False otherwise : bool
"""
# use something other than None to be clearer
return self._id is getattr(
other, '_id', Ellipsis) and self._id is not None
def _reset_identity(self):
"""Initializes or resets ``_id`` attribute with new object"""
self._id = _Identity()
return self
# ndarray compat
def __len__(self):
"""
return the length of the Index
"""
return len(self._data)
def __array__(self, dtype=None):
""" the array interface, return my values """
return self._data.view(np.ndarray)
def __array_wrap__(self, result, context=None):
"""
Gets called after a ufunc
"""
if is_bool_dtype(result):
return result
attrs = self._get_attributes_dict()
attrs = self._maybe_update_attributes(attrs)
return Index(result, **attrs)
@cache_readonly
def dtype(self):
""" return the dtype object of the underlying data """
return self._data.dtype
@cache_readonly
def dtype_str(self):
""" return the dtype str of the underlying data """
return str(self.dtype)
@property
def values(self):
""" return the underlying data as an ndarray """
return self._data.view(np.ndarray)
@property
def _values(self):
# type: () -> Union[ExtensionArray, Index]
# TODO(EA): remove index types as they become extension arrays
"""The best array representation.
This is an ndarray, ExtensionArray, or Index subclass. This differs
from ``_ndarray_values``, which always returns an ndarray.
Both ``_values`` and ``_ndarray_values`` are consistent between
``Series`` and ``Index``.
It may differ from the public '.values' method.
index | values | _values | _ndarray_values |
----------------- | -------------- -| ----------- | --------------- |
CategoricalIndex | Categorical | Categorical | codes |
DatetimeIndex[tz] | ndarray[M8ns] | DTI[tz] | ndarray[M8ns] |
For the following, the ``._values`` is currently ``ndarray[object]``,
but will soon be an ``ExtensionArray``
index | values | _values | _ndarray_values |
----------------- | --------------- | ------------ | --------------- |
PeriodIndex | ndarray[object] | ndarray[obj] | ndarray[int] |
IntervalIndex | ndarray[object] | ndarray[obj] | ndarray[object] |
See Also
--------
values
_ndarray_values
"""
return self.values
def get_values(self):
"""
Return `Index` data as an `numpy.ndarray`.
Returns
-------
numpy.ndarray
A one-dimensional numpy array of the `Index` values.
See Also
--------
Index.values : The attribute that get_values wraps.
Examples
--------
Getting the `Index` values of a `DataFrame`:
>>> df = pd.DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]],
... index=['a', 'b', 'c'], columns=['A', 'B', 'C'])
>>> df
A B C
a 1 2 3
b 4 5 6
c 7 8 9
>>> df.index.get_values()
array(['a', 'b', 'c'], dtype=object)
Standalone `Index` values:
>>> idx = pd.Index(['1', '2', '3'])
>>> idx.get_values()
array(['1', '2', '3'], dtype=object)
`MultiIndex` arrays also have only one dimension:
>>> midx = pd.MultiIndex.from_arrays([[1, 2, 3], ['a', 'b', 'c']],
... names=('number', 'letter'))
>>> midx.get_values()
array([(1, 'a'), (2, 'b'), (3, 'c')], dtype=object)
>>> midx.get_values().ndim
1
"""
return self.values
@Appender(IndexOpsMixin.memory_usage.__doc__)
def memory_usage(self, deep=False):
result = super(Index, self).memory_usage(deep=deep)
# include our engine hashtable
result += self._engine.sizeof(deep=deep)
return result
# ops compat
@deprecate_kwarg(old_arg_name='n', new_arg_name='repeats')
def repeat(self, repeats, *args, **kwargs):
"""
Repeat elements of an Index.
Returns a new index where each element of the current index
is repeated consecutively a given number of times.
Parameters
----------
repeats : int
The number of repetitions for each element.
**kwargs
Additional keywords have no effect but might be accepted for
compatibility with numpy.
Returns
-------
pandas.Index
Newly created Index with repeated elements.
See Also
--------
Series.repeat : Equivalent function for Series
numpy.repeat : Underlying implementation
Examples
--------
>>> idx = pd.Index([1, 2, 3])
>>> idx
Int64Index([1, 2, 3], dtype='int64')
>>> idx.repeat(2)
Int64Index([1, 1, 2, 2, 3, 3], dtype='int64')
>>> idx.repeat(3)
Int64Index([1, 1, 1, 2, 2, 2, 3, 3, 3], dtype='int64')
"""
nv.validate_repeat(args, kwargs)
return self._shallow_copy(self._values.repeat(repeats))
_index_shared_docs['where'] = """
.. versionadded:: 0.19.0
Return an Index of same shape as self and whose corresponding
entries are from self where cond is True and otherwise are from
other.
Parameters
----------
cond : boolean array-like with the same length as self
other : scalar, or array-like
"""
@Appender(_index_shared_docs['where'])
def where(self, cond, other=None):
if other is None:
other = self._na_value
dtype = self.dtype
values = self.values
if is_bool(other) or is_bool_dtype(other):
# bools force casting
values = values.astype(object)
dtype = None
values = np.where(cond, values, other)
if self._is_numeric_dtype and np.any(isna(values)):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return self._shallow_copy_with_infer(values, dtype=dtype)
def ravel(self, order='C'):
"""
return an ndarray of the flattened values of the underlying data
See also
--------
numpy.ndarray.ravel
"""
return self._ndarray_values.ravel(order=order)
# construction helpers
@classmethod
def _try_convert_to_int_index(cls, data, copy, name, dtype):
"""
Attempt to convert an array of data into an integer index.
Parameters
----------
data : The data to convert.
copy : Whether to copy the data or not.
name : The name of the index returned.
Returns
-------
int_index : data converted to either an Int64Index or a
UInt64Index
Raises
------
ValueError if the conversion was not successful.
"""
from .numeric import Int64Index, UInt64Index
if not is_unsigned_integer_dtype(dtype):
# skip int64 conversion attempt if uint-like dtype is passed, as
# this could return Int64Index when UInt64Index is what's desrired
try:
res = data.astype('i8', copy=False)
if (res == data).all():
return Int64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
# Conversion to int64 failed (possibly due to overflow) or was skipped,
# so let's try now with uint64.
try:
res = data.astype('u8', copy=False)
if (res == data).all():
return UInt64Index(res, copy=copy, name=name)
except (OverflowError, TypeError, ValueError):
pass
raise ValueError
@classmethod
def _scalar_data_error(cls, data):
raise TypeError('{0}(...) must be called with a collection of some '
'kind, {1} was passed'.format(cls.__name__,
repr(data)))
@classmethod
def _string_data_error(cls, data):
raise TypeError('String dtype not supported, you may need '
'to explicitly cast to a numeric type')
@classmethod
def _coerce_to_ndarray(cls, data):
"""coerces data to ndarray, raises on scalar data. Converts other
iterables to list first and then to array. Does not touch ndarrays.
"""
if not isinstance(data, (np.ndarray, Index)):
if data is None or is_scalar(data):
cls._scalar_data_error(data)
# other iterable of some kind
if not isinstance(data, (ABCSeries, list, tuple)):
data = list(data)
data = np.asarray(data)
return data
def _get_attributes_dict(self):
""" return an attributes dict for my class """
return {k: getattr(self, k, None) for k in self._attributes}
def view(self, cls=None):
# we need to see if we are subclassing an
# index type here
if cls is not None and not hasattr(cls, '_typ'):
result = self._data.view(cls)
else:
result = self._shallow_copy()
if isinstance(result, Index):
result._id = self._id
return result
def _coerce_scalar_to_index(self, item):
"""
we need to coerce a scalar to a compat for our index type
Parameters
----------
item : scalar item to coerce
"""
dtype = self.dtype
if self._is_numeric_dtype and isna(item):
# We can't coerce to the numeric dtype of "self" (unless
# it's float) if there are NaN values in our output.
dtype = None
return Index([item], dtype=dtype, **self._get_attributes_dict())
_index_shared_docs['copy'] = """
Make a copy of this object. Name and dtype sets those attributes on
the new object.
Parameters
----------
name : string, optional
deep : boolean, default False
dtype : numpy dtype or pandas type
Returns
-------
copy : Index
Notes
-----
In most cases, there should be no functional difference from using
``deep``, but if ``deep`` is passed it will attempt to deepcopy.
"""
@Appender(_index_shared_docs['copy'])
def copy(self, name=None, deep=False, dtype=None, **kwargs):
if deep:
new_index = self._shallow_copy(self._data.copy())
else:
new_index = self._shallow_copy()
names = kwargs.get('names')
names = self._validate_names(name=name, names=names, deep=deep)
new_index = new_index.set_names(names)
if dtype:
new_index = new_index.astype(dtype)
return new_index
def __copy__(self, **kwargs):
return self.copy(**kwargs)
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
return self.copy(deep=True)
def _validate_names(self, name=None, names=None, deep=False):
"""
Handles the quirks of having a singular 'name' parameter for general
Index and plural 'names' parameter for MultiIndex.
"""
from copy import deepcopy
if names is not None and name is not None:
raise TypeError("Can only provide one of `names` and `name`")
elif names is None and name is None:
return deepcopy(self.names) if deep else self.names
elif names is not None:
if not is_list_like(names):
raise TypeError("Must pass list-like as `names`.")
return names
else:
if not is_list_like(name):
return [name]
return name
def __unicode__(self):
"""
Return a string representation for this object.
Invoked by unicode(df) in py2 only. Yields a Unicode String in both
py2/py3.
"""
klass = self.__class__.__name__
data = self._format_data()
attrs = self._format_attrs()
space = self._format_space()
prepr = (u(",%s") %
space).join(u("%s=%s") % (k, v) for k, v in attrs)
# no data provided, just attributes
if data is None:
data = ''
res = u("%s(%s%s)") % (klass, data, prepr)
return res
def _format_space(self):
# using space here controls if the attributes
# are line separated or not (the default)
# max_seq_items = get_option('display.max_seq_items')
# if len(self) > max_seq_items:
# space = "\n%s" % (' ' * (len(klass) + 1))
return " "
@property
def _formatter_func(self):
"""
Return the formatter function
"""
return default_pprint
def _format_data(self, name=None):
"""
Return the formatted data as a unicode string
"""
# do we want to justify (only do so for non-objects)
is_justify = not (self.inferred_type in ('string', 'unicode') or
(self.inferred_type == 'categorical' and
is_object_dtype(self.categories)))
return format_object_summary(self, self._formatter_func,
is_justify=is_justify, name=name)
def _format_attrs(self):
"""
Return a list of tuples of the (attr,formatted_value)
"""
return format_object_attrs(self)
def to_series(self, index=None, name=None):
"""
Create a Series with both index and values equal to the index keys
useful with map for returning an indexer based on an index
Parameters
----------
index : Index, optional
index of resulting Series. If None, defaults to original index
name : string, optional
name of resulting Series. If None, defaults to name of original
index
Returns
-------
Series : dtype will be based on the type of the Index values.
"""
from pandas import Series
if index is None:
index = self._shallow_copy()
if name is None:
name = self.name
return Series(self._to_embed(), index=index, name=name)
def to_frame(self, index=True):
"""
Create a DataFrame with a column containing the Index.
.. versionadded:: 0.21.0
Parameters
----------
index : boolean, default True
Set the index of the returned DataFrame as the original Index.
Returns
-------
DataFrame
DataFrame containing the original Index data.
See Also
--------
Index.to_series : Convert an Index to a Series.
Series.to_frame : Convert Series to DataFrame.
Examples
--------
>>> idx = pd.Index(['Ant', 'Bear', 'Cow'], name='animal')
>>> idx.to_frame()
animal
animal
Ant Ant
Bear Bear
Cow Cow
By default, the original Index is reused. To enforce a new Index:
>>> idx.to_frame(index=False)
animal
0 Ant
1 Bear
2 Cow
"""
from pandas import DataFrame
result = DataFrame(self._shallow_copy(), columns=[self.name or 0])
if index:
result.index = self
return result
def _to_embed(self, keep_tz=False, dtype=None):
"""
*this is an internal non-public method*
return an array repr of this object, potentially casting to object
"""
if dtype is not None:
return self.astype(dtype)._to_embed(keep_tz=keep_tz)
return self.values.copy()
_index_shared_docs['astype'] = """
Create an Index with values cast to dtypes. The class of a new Index
is determined by dtype. When conversion is impossible, a ValueError
exception is raised.
Parameters
----------
dtype : numpy dtype or pandas type
copy : bool, default True
By default, astype always returns a newly allocated object.
If copy is set to False and internal requirements on dtype are
satisfied, the original data is used to create a new Index
or the original Index is returned.
.. versionadded:: 0.19.0
"""
@Appender(_index_shared_docs['astype'])
def astype(self, dtype, copy=True):
if is_dtype_equal(self.dtype, dtype):
return self.copy() if copy else self
elif is_categorical_dtype(dtype):
from .category import CategoricalIndex
return CategoricalIndex(self.values, name=self.name, dtype=dtype,
copy=copy)
try:
return Index(self.values.astype(dtype, copy=copy), name=self.name,
dtype=dtype)
except (TypeError, ValueError):
msg = 'Cannot cast {name} to dtype {dtype}'
raise TypeError(msg.format(name=type(self).__name__, dtype=dtype))
def _to_safe_for_reshape(self):
""" convert to object if we are a categorical """
return self
def _assert_can_do_setop(self, other):
if not is_list_like(other):
raise TypeError('Input must be Index or array-like')
return True
def _convert_can_do_setop(self, other):
if not isinstance(other, Index):
other = Index(other, name=self.name)
result_name = self.name
else:
result_name = self.name if self.name == other.name else None
return other, result_name
def _convert_for_op(self, value):
""" Convert value to be insertable to ndarray """
return value
def _assert_can_do_op(self, value):
""" Check value is valid for scalar op """
if not is_scalar(value):
msg = "'value' must be a scalar, passed: {0}"
raise TypeError(msg.format(type(value).__name__))
@property
def nlevels(self):
return 1
def _get_names(self):
return FrozenList((self.name, ))
def _set_names(self, values, level=None):
"""
Set new names on index. Each name has to be a hashable type.
Parameters
----------
values : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
Raises
------
TypeError if each name is not hashable.
"""
if not is_list_like(values):
raise ValueError('Names must be a list-like')
if len(values) != 1:
raise ValueError('Length of new names must be 1, got %d' %
len(values))
# GH 20527
# All items in 'name' need to be hashable:
for name in values:
if not is_hashable(name):
raise TypeError('{}.name must be a hashable type'
.format(self.__class__.__name__))
self.name = values[0]
names = property(fset=_set_names, fget=_get_names)
def set_names(self, names, level=None, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
names : str or sequence
name(s) to set
level : int, level name, or sequence of int/level names (default None)
If the index is a MultiIndex (hierarchical), level(s) to set (None
for all levels). Otherwise level must be None
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
Examples
--------
>>> Index([1, 2, 3, 4]).set_names('foo')
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> Index([1, 2, 3, 4]).set_names(['foo'])
Int64Index([1, 2, 3, 4], dtype='int64', name='foo')
>>> idx = MultiIndex.from_tuples([(1, u'one'), (1, u'two'),
(2, u'one'), (2, u'two')],
names=['foo', 'bar'])
>>> idx.set_names(['baz', 'quz'])
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'quz'])
>>> idx.set_names('baz', level=0)
MultiIndex(levels=[[1, 2], [u'one', u'two']],
labels=[[0, 0, 1, 1], [0, 1, 0, 1]],
names=[u'baz', u'bar'])
"""
from .multi import MultiIndex
if level is not None and not isinstance(self, MultiIndex):
raise ValueError('Level must be None for non-MultiIndex')
if level is not None and not is_list_like(level) and is_list_like(
names):
raise TypeError("Names must be a string")
if not is_list_like(names) and level is None and self.nlevels > 1:
raise TypeError("Must pass list-like as `names`.")
if not is_list_like(names):
names = [names]
if level is not None and not is_list_like(level):
level = [level]
if inplace:
idx = self
else:
idx = self._shallow_copy()
idx._set_names(names, level=level)
if not inplace:
return idx
def rename(self, name, inplace=False):
"""
Set new names on index. Defaults to returning new index.
Parameters
----------
name : str or list
name to set
inplace : bool
if True, mutates in place
Returns
-------
new index (of same type and class...etc) [if inplace, returns None]
"""
return self.set_names([name], inplace=inplace)
@property
def _has_complex_internals(self):
# to disable groupby tricks in MultiIndex
return False
def _summary(self, name=None):
"""
Return a summarized representation
Parameters
----------
name : str
name to use in the summary representation
Returns
-------
String with a summarized representation of the index
"""
if len(self) > 0:
head = self[0]
if (hasattr(head, 'format') and
not isinstance(head, compat.string_types)):
head = head.format()
tail = self[-1]
if (hasattr(tail, 'format') and
not isinstance(tail, compat.string_types)):
tail = tail.format()
index_summary = ', %s to %s' % (pprint_thing(head),
pprint_thing(tail))
else:
index_summary = ''
if name is None:
name = type(self).__name__
return '%s: %s entries%s' % (name, len(self), index_summary)
def summary(self, name=None):
"""
Return a summarized representation
.. deprecated:: 0.23.0
"""
warnings.warn("'summary' is deprecated and will be removed in a "
"future version.", FutureWarning, stacklevel=2)
return self._summary(name)
def _mpl_repr(self):
# how to represent ourselves to matplotlib
return self.values
_na_value = np.nan
"""The expected NA value to use with this index."""
# introspection
@property
def is_monotonic(self):
""" alias for is_monotonic_increasing (deprecated) """
return self.is_monotonic_increasing
@property
def is_monotonic_increasing(self):
"""
return if the index is monotonic increasing (only equal or
increasing) values.
Examples
--------
>>> Index([1, 2, 3]).is_monotonic_increasing
True
>>> Index([1, 2, 2]).is_monotonic_increasing
True
>>> Index([1, 3, 2]).is_monotonic_increasing
False
"""
return self._engine.is_monotonic_increasing
@property
def is_monotonic_decreasing(self):
"""
return if the index is monotonic decreasing (only equal or
decreasing) values.
Examples
--------
>>> Index([3, 2, 1]).is_monotonic_decreasing
True
>>> Index([3, 2, 2]).is_monotonic_decreasing
True
>>> Index([3, 1, 2]).is_monotonic_decreasing
False
"""
return self._engine.is_monotonic_decreasing
@property
def _is_strictly_monotonic_increasing(self):
"""return if the index is strictly monotonic increasing
(only increasing) values
Examples
--------
>>> Index([1, 2, 3])._is_strictly_monotonic_increasing
True
>>> Index([1, 2, 2])._is_strictly_monotonic_increasing
False
>>> Index([1, 3, 2])._is_strictly_monotonic_increasing
False
"""
return self.is_unique and self.is_monotonic_increasing
@property
def _is_strictly_monotonic_decreasing(self):
"""return if the index is strictly monotonic decreasing
(only decreasing) values
Examples
--------
>>> Index([3, 2, 1])._is_strictly_monotonic_decreasing
True
>>> Index([3, 2, 2])._is_strictly_monotonic_decreasing
False
>>> Index([3, 1, 2])._is_strictly_monotonic_decreasing
False
"""
return self.is_unique and self.is_monotonic_decreasing
def is_lexsorted_for_tuple(self, tup):
return True
@cache_readonly
def is_unique(self):
""" return if the index has unique values """
return self._engine.is_unique
@property
def has_duplicates(self):
return not self.is_unique
def is_boolean(self):
return self.inferred_type in ['boolean']
def is_integer(self):
return self.inferred_type in ['integer']
def is_floating(self):
return self.inferred_type in ['floating', 'mixed-integer-float']
def is_numeric(self):
return self.inferred_type in ['integer', 'floating']
def is_object(self):
return is_object_dtype(self.dtype)
def is_categorical(self):
"""
Check if the Index holds categorical data.
Returns
-------
boolean
True if the Index is categorical.
See Also
--------
CategoricalIndex : Index for categorical data.
Examples
--------
>>> idx = pd.Index(["Watermelon", "Orange", "Apple",
... "Watermelon"]).astype("category")
>>> idx.is_categorical()
True
>>> idx = pd.Index([1, 3, 5, 7])
>>> idx.is_categorical()
False
>>> s = pd.Series(["Peter", "Victor", "Elisabeth", "Mar"])
>>> s
0 Peter
1 Victor
2 Elisabeth
3 Mar
dtype: object
>>> s.index.is_categorical()
False
"""
return self.inferred_type in ['categorical']
def is_interval(self):
return self.inferred_type in ['interval']
def is_mixed(self):
return self.inferred_type in ['mixed']
def holds_integer(self):
return self.inferred_type in ['integer', 'mixed-integer']
_index_shared_docs['_convert_scalar_indexer'] = """
Convert a scalar indexer.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_scalar_indexer'])
def _convert_scalar_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
if kind == 'iloc':
return self._validate_indexer('positional', key, kind)
if len(self) and not isinstance(self, ABCMultiIndex,):
# we can raise here if we are definitive that this
# is positional indexing (eg. .ix on with a float)
# or label indexing if we are using a type able
# to be represented in the index
if kind in ['getitem', 'ix'] and is_float(key):
if not self.is_floating():
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_float(key):
# we want to raise KeyError on string/mixed here
# technically we *could* raise a TypeError
# on anything but mixed though
if self.inferred_type not in ['floating',
'mixed-integer-float',
'string',
'unicode',
'mixed']:
return self._invalid_indexer('label', key)
elif kind in ['loc'] and is_integer(key):
if not self.holds_integer():
return self._invalid_indexer('label', key)
return key
_index_shared_docs['_convert_slice_indexer'] = """
Convert a slice indexer.
By definition, these are labels unless 'iloc' is passed in.
Floats are not allowed as the start, step, or stop of the slice.
Parameters
----------
key : label of the slice bound
kind : {'ix', 'loc', 'getitem', 'iloc'} or None
"""
@Appender(_index_shared_docs['_convert_slice_indexer'])
def _convert_slice_indexer(self, key, kind=None):
assert kind in ['ix', 'loc', 'getitem', 'iloc', None]
# if we are not a slice, then we are done
if not isinstance(key, slice):
return key
# validate iloc
if kind == 'iloc':
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# potentially cast the bounds to integers
start, stop, step = key.start, key.stop, key.step
# figure out if this is a positional indexer
def is_int(v):
return v is None or is_integer(v)
is_null_slicer = start is None and stop is None
is_index_slice = is_int(start) and is_int(stop)
is_positional = is_index_slice and not self.is_integer()
if kind == 'getitem':
"""
called from the getitem slicers, validate that we are in fact
integers
"""
if self.is_integer() or is_index_slice:
return slice(self._validate_indexer('slice', key.start, kind),
self._validate_indexer('slice', key.stop, kind),
self._validate_indexer('slice', key.step, kind))
# convert the slice to an indexer here
# if we are mixed and have integers
try:
if is_positional and self.is_mixed():
# TODO: i, j are not used anywhere
if start is not None:
i = self.get_loc(start) # noqa
if stop is not None:
j = self.get_loc(stop) # noqa
is_positional = False
except KeyError:
if self.inferred_type == 'mixed-integer-float':
raise
if is_null_slicer:
indexer = key
elif is_positional:
indexer = key
else:
try:
indexer = self.slice_indexer(start, stop, step, kind=kind)
except Exception:
if is_index_slice:
if self.is_integer():
raise
else:
indexer = key
else:
raise
return indexer
def _convert_listlike_indexer(self, keyarr, kind=None):
"""
Parameters
----------
keyarr : list-like
Indexer to convert.
Returns
-------
tuple (indexer, keyarr)
indexer is an ndarray or None if cannot convert
keyarr are tuple-safe keys
"""
if isinstance(keyarr, Index):
keyarr = self._convert_index_indexer(keyarr)
else:
keyarr = self._convert_arr_indexer(keyarr)
indexer = self._convert_list_indexer(keyarr, kind=kind)
return indexer, keyarr
_index_shared_docs['_convert_arr_indexer'] = """
Convert an array-like indexer to the appropriate dtype.
Parameters
----------
keyarr : array-like
Indexer to convert.
Returns
-------
converted_keyarr : array-like
"""
@Appender(_index_shared_docs['_convert_arr_indexer'])
def _convert_arr_indexer(self, keyarr):
keyarr = com._asarray_tuplesafe(keyarr)
return keyarr
_index_shared_docs['_convert_index_indexer'] = """
Convert an Index indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
Returns
-------
converted_keyarr : Index (or sub-class)
"""
@Appender(_index_shared_docs['_convert_index_indexer'])
def _convert_index_indexer(self, keyarr):
return keyarr
_index_shared_docs['_convert_list_indexer'] = """
Convert a list-like indexer to the appropriate dtype.
Parameters
----------
keyarr : Index (or sub-class)
Indexer to convert.
kind : iloc, ix, loc, optional
Returns
-------
positional indexer or None
"""
@Appender(_index_shared_docs['_convert_list_indexer'])
def _convert_list_indexer(self, keyarr, kind=None):
if (kind in [None, 'iloc', 'ix'] and
is_integer_dtype(keyarr) and not self.is_floating() and
not isinstance(keyarr, ABCPeriodIndex)):
if self.inferred_type == 'mixed-integer':
indexer = self.get_indexer(keyarr)
if (indexer >= 0).all():
return indexer
# missing values are flagged as -1 by get_indexer and negative
# indices are already converted to positive indices in the
# above if-statement, so the negative flags are changed to
# values outside the range of indices so as to trigger an
# IndexError in maybe_convert_indices
indexer[indexer < 0] = len(self)
from pandas.core.indexing import maybe_convert_indices
return maybe_convert_indices(indexer, len(self))
elif not self.inferred_type == 'integer':
keyarr = np.where(keyarr < 0, len(self) + keyarr, keyarr)
return keyarr
return None
def _invalid_indexer(self, form, key):
""" consistent invalid indexer message """
raise TypeError("cannot do {form} indexing on {klass} with these "
"indexers [{key}] of {kind}".format(
form=form, klass=type(self), key=key,
kind=type(key)))
def get_duplicates(self):
"""
Extract duplicated index elements.
Returns a sorted list of index elements which appear more than once in
the index.
.. deprecated:: 0.23.0
Use idx[idx.duplicated()].unique() instead
Returns
-------
array-like
List of duplicated indexes.
See Also
--------
Index.duplicated : Return boolean array denoting duplicates.
Index.drop_duplicates : Return Index with duplicates removed.
Examples
--------
Works on different Index of types.
>>> pd.Index([1, 2, 2, 3, 3, 3, 4]).get_duplicates()
[2, 3]
>>> pd.Index([1., 2., 2., 3., 3., 3., 4.]).get_duplicates()
[2.0, 3.0]
>>> pd.Index(['a', 'b', 'b', 'c', 'c', 'c', 'd']).get_duplicates()
['b', 'c']
Note that for a DatetimeIndex, it does not return a list but a new
DatetimeIndex:
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03',
... '2018-01-03', '2018-01-04', '2018-01-04'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex(['2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', freq=None)
Sorts duplicated elements even when indexes are unordered.
>>> pd.Index([1, 2, 3, 2, 3, 4, 3]).get_duplicates()
[2, 3]
Return empty array-like structure when all elements are unique.
>>> pd.Index([1, 2, 3, 4]).get_duplicates()
[]
>>> dates = pd.to_datetime(['2018-01-01', '2018-01-02', '2018-01-03'],
... format='%Y-%m-%d')
>>> pd.Index(dates).get_duplicates()
DatetimeIndex([], dtype='datetime64[ns]', freq=None)
"""
warnings.warn("'get_duplicates' is deprecated and will be removed in "
"a future release. You can use "
"idx[idx.duplicated()].unique() instead",
FutureWarning, stacklevel=2)
return self[self.duplicated()].unique()
def _cleanup(self):
self._engine.clear_mapping()
@cache_readonly
def _constructor(self):
return type(self)
@cache_readonly
def _engine(self):
# property, for now, slow to look up
return self._engine_type(lambda: self._ndarray_values, len(self))
def _validate_index_level(self, level):
"""
Validate index level.
For single-level Index getting level number is a no-op, but some
verification must be done like in MultiIndex.
"""
if isinstance(level, int):
if level < 0 and level != -1:
raise IndexError("Too many levels: Index has only 1 level,"
" %d is not a valid level number" % (level, ))
elif level > 0:
raise IndexError("Too many levels:"
" Index has only 1 level, not %d" %
(level + 1))
elif level != self.name:
raise KeyError('Level %s must be same as name (%s)' %
(level, self.name))
def _get_level_number(self, level):
self._validate_index_level(level)
return 0
@cache_readonly
def inferred_type(self):
""" return a string of the type inferred from the values """
return lib.infer_dtype(self)
def _is_memory_usage_qualified(self):
""" return a boolean if we need a qualified .info display """
return self.is_object()
def is_type_compatible(self, kind):
return kind == self.inferred_type
@cache_readonly
def is_all_dates(self):
if self._data is None:
return False
return is_datetime_array(_ensure_object(self.values))
def __reduce__(self):
d = dict(data=self._data)
d.update(self._get_attributes_dict())
return _new_Index, (self.__class__, d), None
def __setstate__(self, state):
"""Necessary for making this object picklable"""
if isinstance(state, dict):
self._data = state.pop('data')
for k, v in compat.iteritems(state):
setattr(self, k, v)
elif isinstance(state, tuple):
if len(state) == 2:
nd_state, own_state = state
data = np.empty(nd_state[1], dtype=nd_state[2])
np.ndarray.__setstate__(data, nd_state)
self.name = own_state[0]
else: # pragma: no cover
data = np.empty(state)
np.ndarray.__setstate__(data, state)
self._data = data
self._reset_identity()
else:
raise Exception("invalid pickle state")
_unpickle_compat = __setstate__
def __nonzero__(self):
raise ValueError("The truth value of a {0} is ambiguous. "
"Use a.empty, a.bool(), a.item(), a.any() or a.all()."
.format(self.__class__.__name__))
__bool__ = __nonzero__
_index_shared_docs['__contains__'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['__contains__'] % _index_doc_kwargs)
def __contains__(self, key):
hash(key)
try:
return key in self._engine
except (OverflowError, TypeError, ValueError):
return False
_index_shared_docs['contains'] = """
return a boolean if this key is IN the index
Parameters
----------
key : object
Returns
-------
boolean
"""
@Appender(_index_shared_docs['contains'] % _index_doc_kwargs)
def contains(self, key):
hash(key)
try:
return key in self._engine
except (TypeError, ValueError):
return False
def __hash__(self):
raise TypeError("unhashable type: %r" % type(self).__name__)
def __setitem__(self, key, value):
raise TypeError("Index does not support mutable operations")
def __getitem__(self, key):
"""
Override numpy.ndarray's __getitem__ method to work as desired.
This function adds lists and Series as valid boolean indexers
(ndarrays only supports ndarray with dtype=bool).
If resulting ndim != 1, plain ndarray is returned instead of
corresponding `Index` subclass.
"""
# There's no custom logic to be implemented in __getslice__, so it's
# not overloaded intentionally.
getitem = self._data.__getitem__
promote = self._shallow_copy
if is_scalar(key):
return getitem(key)
if isinstance(key, slice):
# This case is separated from the conditional above to avoid
# pessimization of basic indexing.
return promote(getitem(key))
if com.is_bool_indexer(key):
key = np.asarray(key)
key = com._values_from_object(key)
result = getitem(key)
if not is_scalar(result):
return promote(result)
else:
return result
def _can_hold_identifiers_and_holds_name(self, name):
"""
Faster check for ``name in self`` when we know `name` is a Python
identifier (e.g. in NDFrame.__getattr__, which hits this to support
. key lookup). For indexes that can't hold identifiers (everything
but object & categorical) we just return False.
https://github.com/pandas-dev/pandas/issues/19764
"""
if self.is_object() or self.is_categorical():
return name in self
return False
def append(self, other):
"""
Append a collection of Index options together
Parameters
----------
other : Index or list/tuple of indices
Returns
-------
appended : Index
"""
to_concat = [self]
if isinstance(other, (list, tuple)):
to_concat = to_concat + list(other)
else:
to_concat.append(other)
for obj in to_concat:
if not isinstance(obj, Index):
raise TypeError('all inputs must be Index')
names = {obj.name for obj in to_concat}
name = None if len(names) > 1 else self.name
return self._concat(to_concat, name)
def _concat(self, to_concat, name):
typs = _concat.get_dtype_kinds(to_concat)
if len(typs) == 1:
return self._concat_same_dtype(to_concat, name=name)
return | _concat._concat_index_asobject(to_concat, name=name) | pandas.core.dtypes.concat._concat_index_asobject |
# coding=utf-8
# Author: <NAME>
# Date: Jul 05, 2019
#
# Description: Maps DE genes to String-DB. Keeps only those genes that we want.
#
# NOTE: For some reason, "dmelanogaster_gene_ensembl" did not retrieve all gene names. Some were manually added at the end.
#
import math
import pandas as pd
pd.set_option('display.max_rows', 100)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from utils import open_undefined_last_column_files, ensurePathExists
from pybiomart import Dataset
def combine_id_string_x_with_id_string_y(r):
x = r['id_string_x']
y = r['id_string_y']
if isinstance(x, list):
return x
elif not pd.isna(x):
return x
else:
return y
if __name__ == '__main__':
#
# [H]omo [S]apiens (9606) - [A]liases
#
print('Mapping HS')
# Query bioMart for Gene Name/Description
ds_HS = Dataset(name='hsapiens_gene_ensembl', host='http://www.ensembl.org')
df_HS_G = ds_HS.query(attributes=['ensembl_gene_id', 'external_gene_name', 'gene_biotype', 'description']).set_index('Gene stable ID')
rCSVFileCG = "../01-diff-gene-exp/results/HS/HS-DGE_Cyte_vs_Gonia.csv"
rCSVFileCT = "../01-diff-gene-exp/results/HS/HS-DGE_Tid_vs_Cyte.csv"
df_HS_CG = pd.read_csv(rCSVFileCG, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_HS_CG.index.name = 'id_gene'
df_HS_CG.index = df_HS_CG.index.map(lambda x: x.split('.')[0])
df_HS_CG.columns = [x + '_CyteGonia' for x in df_HS_CG.columns]
df_HS_CT = pd.read_csv(rCSVFileCT, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_HS_CT.columns = [x + '_TidCyte' for x in df_HS_CT.columns]
df_HS_CT.index.name = 'id_gene'
df_HS_CT.index = df_HS_CT.index.map(lambda x: x.split('.')[0])
# Map: id_gene <-> id_string
df_SA = open_undefined_last_column_files('../data/StringDB/9606/9606.protein.aliases.v11.0.txt.gz', skiprows=1, n_fixed_cols=2, names=["id_string", "alias", "source"])
# Parse String Data - Note some genes have multiple id_string, others have no match
df_SA = df_SA.loc[df_SA['alias'].isin(df_HS_CG.index.to_list() + df_HS_CT.index.to_list()), ["alias", "id_string"]].rename(columns={"alias": "id_gene"})
df_SAg = df_SA.groupby('id_gene').agg({'id_string': lambda x: x if len(x) == 1 else list(x)})
# Up
df_HS_CG['id_string'] = df_SAg['id_string']
df_HS_CG['Cyte_vs_Gonia'] = True
# Down
df_HS_CT['id_string'] = df_SAg['id_string']
df_HS_CT['Tid_vs_Cyte'] = True
# Merge Up/Down
df_HS = pd.merge(df_HS_CG, df_HS_CT, how='outer', left_index=True, right_index=True)
df_HS['id_string'] = df_HS.apply(combine_id_string_x_with_id_string_y, axis='columns')
df_HS['gene'] = df_HS_G['Gene name']
df_HS['biotype'] = df_HS_G['Gene type']
df_HS[['Cyte_vs_Gonia', 'Tid_vs_Cyte']] = df_HS[['Cyte_vs_Gonia', 'Tid_vs_Cyte']].fillna(False)
# Index Rows/Cols
maskcols = [
'id_string', 'gene', 'Cyte_vs_Gonia', 'Tid_vs_Cyte',
'logCPM_CyteGonia', 'logFC_CyteGonia', 'FDR_CyteGonia',
'logCPM_TidCyte', 'logFC_TidCyte', 'FDR_TidCyte',
'biotype'
]
df_HS = df_HS.loc[:, maskcols]
# To CSV
df_HS.to_csv('results/HS-DE_genes.csv.gz')
#
# !!Mitosis!! [H]omo [S]apiens (9606) - [A]liases
#
rCSVFileMP = "../01-diff-gene-exp/results/HS/HS-GE_Mitotic_vs_PreMitotic.csv"
rCSVFilePM = "../01-diff-gene-exp/results/HS/HS-GE_PostMitotic_vs_Mitotic.csv"
df_HS_MP = pd.read_csv(rCSVFileMP, index_col=0).loc[:, []]
df_HS_MP.index.name = 'id_gene'
df_HS_MP.index = df_HS_MP.index.map(lambda x: x.split('.')[0])
df_HS_MP.columns = [x + '_MitPre' for x in df_HS_MP.columns]
df_HS_PM = pd.read_csv(rCSVFilePM, index_col=0).loc[:, []]
df_HS_PM.columns = [x + '_PosMit' for x in df_HS_PM.columns]
df_HS_PM.index.name = 'id_gene'
df_HS_PM.index = df_HS_PM.index.map(lambda x: x.split('.')[0])
# Map: id_gene <-> id_string
df_SA = open_undefined_last_column_files('../data/StringDB/9606/9606.protein.aliases.v11.0.txt.gz', skiprows=1, n_fixed_cols=2, names=["id_string", "alias", "source"])
# Parse String Data - Note some genes have multiple id_string, others have no match
df_SA = df_SA.loc[df_SA['alias'].isin(df_HS_MP.index.to_list() + df_HS_PM.index.to_list()), ["alias", "id_string"]].rename(columns={"alias": "id_gene"})
df_SAg = df_SA.groupby('id_gene').agg({'id_string': lambda x: x if len(x) == 1 else list(x)})
# Up
df_HS_MP['id_string'] = df_SAg['id_string']
df_HS_MP['Mit_vs_Pre'] = True
# Down
df_HS_PM['id_string'] = df_SAg['id_string']
df_HS_PM['Pos_vs_Mit'] = True
# Merge Up/Down
df_HSmit = pd.merge(df_HS_MP, df_HS_PM, how='outer', left_index=True, right_index=True)
df_HSmit['id_string'] = df_HSmit.apply(combine_id_string_x_with_id_string_y, axis='columns')
df_HSmit['gene'] = df_HS_G['Gene name']
df_HSmit['biotype'] = df_HS_G['Gene type']
df_HSmit[['Mit_vs_Pre', 'Pos_vs_Mit']] = df_HSmit[['Mit_vs_Pre', 'Pos_vs_Mit']].fillna(False)
# Index Rows/Cols
maskcols = [
'id_string', 'gene', 'Mit_vs_Pre', 'Pos_vs_Mit',
'biotype'
]
df_HSmit = df_HSmit.loc[:, maskcols]
# To CSV
df_HSmit.to_csv('results/DE/HS-E_mitotic_genes.csv.gz')
#
# [M]us [M]usculus (10090) - [A]liases
#
print('Mapping MM')
# Query bioMart for Gene Name/Description
ds_MM = Dataset(name='mmusculus_gene_ensembl', host='http://www.ensembl.org')
df_MM_G = ds_MM.query(attributes=['ensembl_gene_id', 'external_gene_name', 'gene_biotype', 'description']).set_index('Gene stable ID')
rCSVFileCG = "../01-diff-gene-exp/results/MM/MM-DGE_Cyte_vs_Gonia.csv"
rCSVFileCT = "../01-diff-gene-exp/results/MM/MM-DGE_Tid_vs_Cyte.csv"
df_MM_CG = pd.read_csv(rCSVFileCG, index_col=0).loc[:, ['logFC', 'logCPM', 'FDR']]
df_MM_CG.index.name = 'id_gene'
df_MM_CG.index = df_MM_CG.index.map(lambda x: x.split('.')[0])
df_MM_CG.columns = [x + '_CyteGonia' for x in df_MM_CG.columns]
df_MM_CT = | pd.read_csv(rCSVFileCT, index_col=0) | pandas.read_csv |
from context import dero
import pandas as pd
from pandas.util.testing import assert_frame_equal
from pandas import Timestamp
from numpy import nan
import numpy
class DataFrameTest:
df = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_duplicate_row = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01),
(10516, 'a', '1/2/2000', 1.02),
(10516, 'a', '1/3/2000', 1.03),
(10516, 'a', '1/3/2000', 1.03), #this is a duplicated row
(10516, 'a', '1/4/2000', 1.04),
(10516, 'b', '1/1/2000', 1.05),
(10516, 'b', '1/2/2000', 1.06),
(10516, 'b', '1/3/2000', 1.07),
(10516, 'b', '1/4/2000', 1.08),
(10517, 'a', '1/1/2000', 1.09),
(10517, 'a', '1/2/2000', 1.10),
(10517, 'a', '1/3/2000', 1.11),
(10517, 'a', '1/4/2000', 1.12),
], columns = ['PERMNO','byvar','Date', 'RET'])
df_weight = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0),
(10516, 'a', '1/2/2000', 1.02, 1),
(10516, 'a', '1/3/2000', 1.03, 1),
(10516, 'a', '1/4/2000', 1.04, 0),
(10516, 'b', '1/1/2000', 1.05, 1),
(10516, 'b', '1/2/2000', 1.06, 1),
(10516, 'b', '1/3/2000', 1.07, 1),
(10516, 'b', '1/4/2000', 1.08, 1),
(10517, 'a', '1/1/2000', 1.09, 0),
(10517, 'a', '1/2/2000', 1.1, 0),
(10517, 'a', '1/3/2000', 1.11, 0),
(10517, 'a', '1/4/2000', 1.12, 1),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight'])
df_nan_byvar = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', 3),
('b', 4),
], columns = ['byvar', 'val'])
df_nan_byvar_and_val = pd.DataFrame(data = [
('a', 1),
(nan, 2),
('b', nan),
('b', 4),
], columns = ['byvar', 'val'])
single_ticker_df = pd.DataFrame(data = [
('a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['byvar', 'Date', 'TICKER'])
df_datetime = df.copy()
df_datetime['Date'] = pd.to_datetime(df_datetime['Date'])
df_datetime_no_ret = df_datetime.copy()
df_datetime_no_ret.drop('RET', axis=1, inplace=True)
df_gvkey_str = pd.DataFrame([
('001076','3/1/1995'),
('001076','4/1/1995'),
('001722','1/1/2012'),
('001722','7/1/2012'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str['Date'] = pd.to_datetime(df_gvkey_str['Date'])
df_gvkey_num = df_gvkey_str.copy()
df_gvkey_num['GVKEY'] = df_gvkey_num['GVKEY'].astype('float64')
df_gvkey_str2 = pd.DataFrame([
('001076','2/1/1995'),
('001076','3/2/1995'),
('001722','11/1/2011'),
('001722','10/1/2011'),
('001722', nan),
(nan ,'1/1/2012')
], columns=['GVKEY','Date'])
df_gvkey_str2['Date'] = pd.to_datetime(df_gvkey_str2['Date'])
df_fill_data = pd.DataFrame(
data=[
(4, 'c', nan, 'a'),
(1, 'd', 3, 'a'),
(10, 'e', 100, 'a'),
(2, nan, 6, 'b'),
(5, 'f', 8, 'b'),
(11, 'g', 150, 'b'),
],
columns=['y', 'x1', 'x2', 'group']
)
class TestCumulate(DataFrameTest):
expect_between_1_3 = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.1, 1.1),
(10517, 'a', '1/3/2000', 1.11, 1.2210000000000003),
(10517, 'a', '1/4/2000', 1.12, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'cum_RET'])
expect_first = pd.DataFrame([
(10516, 'a', '1/1/2000', 1.01, 1.01),
(10516, 'a', '1/2/2000', 1.02, 1.02),
(10516, 'a', '1/3/2000', 1.03, 1.0506),
(10516, 'a', '1/4/2000', 1.04, 1.092624),
(10516, 'b', '1/1/2000', 1.05, 1.05),
(10516, 'b', '1/2/2000', 1.06, 1.06),
(10516, 'b', '1/3/2000', 1.07, 1.1342),
(10516, 'b', '1/4/2000', 1.08, 1.224936),
(10517, 'a', '1/1/2000', 1.09, 1.09),
(10517, 'a', '1/2/2000', 1.10, 1.10),
(10517, 'a', '1/3/2000', 1.11, 1.221),
(10517, 'a', '1/4/2000', 1.12, 1.36752),
], columns = ['PERMNO','byvar','Date', 'RET', 'cum_RET'])
def test_method_between_1_3(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[1,3])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_between_m2_0(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
#Actually same result as [1,3]
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_shifted_index(self):
df = self.df.copy()
df.index = df.index + 10
cum_df = dero.pandas.cumulate(df, 'RET', 'between', periodvar='Date',
byvars=['PERMNO','byvar'], time=[-2,0])
assert_frame_equal(self.expect_between_1_3, cum_df, check_dtype=False)
def test_method_first(self):
cum_df = dero.pandas.cumulate(self.df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'])
assert_frame_equal(self.expect_first, cum_df, check_dtype=False)
def test_grossify(self):
df = self.df.copy() #don't overwrite original
df['RET'] -= 1 #ungrossify
expect_first_grossify = self.expect_first.copy()
expect_first_grossify['cum_RET'] -= 1
expect_first_grossify['RET'] -= 1
cum_df = dero.pandas.cumulate(df, 'RET', 'first', periodvar='Date',
byvars=['PERMNO','byvar'], grossify=True)
assert_frame_equal(expect_first_grossify, cum_df, check_dtype=False)
class TestGroupbyMerge(DataFrameTest):
def test_subset_max(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'max', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 1.04),
(10516, 'a', '1/2/2000', 1.02, 1.04),
(10516, 'a', '1/3/2000', 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.04, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.08),
(10516, 'b', '1/2/2000', 1.06, 1.08),
(10516, 'b', '1/3/2000', 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.08, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.12),
(10517, 'a', '1/2/2000', 1.10, 1.12),
(10517, 'a', '1/3/2000', 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.12, 1.12)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_max'])
assert_frame_equal(expect_df, out)
def test_subset_std(self):
byvars = ['PERMNO','byvar']
out = dero.pandas.groupby_merge(self.df, byvars, 'std', subset='RET')
expect_df = pd.DataFrame(
[(10516, 'a', '1/1/2000', 1.01, 0.012909944487358068),
(10516, 'a', '1/2/2000', 1.02, 0.012909944487358068),
(10516, 'a', '1/3/2000', 1.03, 0.012909944487358068),
(10516, 'a', '1/4/2000', 1.04, 0.012909944487358068),
(10516, 'b', '1/1/2000', 1.05, 0.012909944487358068),
(10516, 'b', '1/2/2000', 1.06, 0.012909944487358068),
(10516, 'b', '1/3/2000', 1.07, 0.012909944487358068),
(10516, 'b', '1/4/2000', 1.08, 0.012909944487358068),
(10517, 'a', '1/1/2000', 1.09, 0.012909944487358068),
(10517, 'a', '1/2/2000', 1.10, 0.012909944487358068),
(10517, 'a', '1/3/2000', 1.11, 0.012909944487358068),
(10517, 'a', '1/4/2000', 1.12, 0.012909944487358068)],
columns = ['PERMNO','byvar','Date', 'RET', 'RET_std'])
assert_frame_equal(expect_df, out)
def test_nan_byvar_transform(self):
expect_df = self.df_nan_byvar.copy()
expect_df['val_transform'] = expect_df['val']
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'transform', (lambda x: x))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_transform_numeric(self):
non_standard_index = self.df_nan_byvar_and_val.copy()
non_standard_index.index = [5,6,7,8]
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
expect_df.index = [5,6,7,8]
out = dero.pandas.groupby_merge(non_standard_index, 'byvar', 'transform', (lambda x: x + 1))
assert_frame_equal(expect_df, out)
def test_nan_byvar_and_nan_val_and_nonstandard_index_transform_numeric(self):
expect_df = self.df_nan_byvar_and_val.copy()
expect_df['val_transform'] = expect_df['val'] + 1
def test_nan_byvar_sum(self):
expect_df = pd.DataFrame(data = [
('a', 1, 1.0),
(nan, 2, nan),
('b', 3, 7.0),
('b', 4, 7.0),
], columns = ['byvar', 'val', 'val_sum'])
out = dero.pandas.groupby_merge(self.df_nan_byvar, 'byvar', 'sum')
assert_frame_equal(expect_df, out)
class TestLongToWide:
expect_df_with_colindex = pd.DataFrame(data = [
(10516, 'a', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar',
'RET1/1/2000', 'RET1/2/2000',
'RET1/3/2000', 'RET1/4/2000'])
expect_df_no_colindex = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/2/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/3/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'a', '1/4/2000', 1.01, 1.02, 1.03, 1.04),
(10516, 'b', '1/1/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/2/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/3/2000', 1.05, 1.06, 1.07, 1.08),
(10516, 'b', '1/4/2000', 1.05, 1.06, 1.07, 1.08),
(10517, 'a', '1/1/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/2/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/3/2000', 1.09, 1.1, 1.11, 1.12),
(10517, 'a', '1/4/2000', 1.09, 1.1, 1.11, 1.12),
], columns = ['PERMNO', 'byvar', 'Date', 'RET0',
'RET1', 'RET2', 'RET3'])
input_data = DataFrameTest()
ltw_no_dup_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_dup_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET', colindex='Date')
ltw_no_dup_no_colindex = dero.pandas.long_to_wide(input_data.df,
['PERMNO', 'byvar'], 'RET')
ltw_dup_no_colindex = dero.pandas.long_to_wide(input_data.df_duplicate_row,
['PERMNO', 'byvar'], 'RET')
df_list = [ltw_no_dup_colindex, ltw_dup_colindex,
ltw_no_dup_no_colindex, ltw_dup_no_colindex]
def test_no_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_no_dup_colindex)
def test_duplicates_with_colindex(self):
assert_frame_equal(self.expect_df_with_colindex, self.ltw_dup_colindex)
def test_no_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_no_dup_no_colindex)
def test_duplicates_no_colindex(self):
assert_frame_equal(self.expect_df_no_colindex, self.ltw_dup_no_colindex)
def test_no_extra_vars(self):
for df in self.df_list:
assert ('__idx__','__key__') not in df.columns
class TestPortfolioAverages:
input_data = DataFrameTest()
expect_avgs_no_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001),
(1, 'b', 1.0550000000000002),
(2, 'a', 1.1050000000000002),
(2, 'b', 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET'])
expect_avgs_wt = pd.DataFrame(data = [
(1, 'a', 1.0250000000000001, 1.025),
(1, 'b', 1.0550000000000002, 1.0550000000000002),
(2, 'a', 1.1050000000000002, 1.12),
(2, 'b', 1.0750000000000002, 1.0750000000000002),
], columns = ['portfolio', 'byvar', 'RET', 'RET_wavg'])
expect_ports = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.01, 0, 1),
(10516, 'a', '1/2/2000', 1.02, 1, 1),
(10516, 'a', '1/3/2000', 1.03, 1, 1),
(10516, 'a', '1/4/2000', 1.04, 0, 1),
(10516, 'b', '1/1/2000', 1.05, 1, 1),
(10516, 'b', '1/2/2000', 1.06, 1, 1),
(10516, 'b', '1/3/2000', 1.07, 1, 2),
(10516, 'b', '1/4/2000', 1.08, 1, 2),
(10517, 'a', '1/1/2000', 1.09, 0, 2),
(10517, 'a', '1/2/2000', 1.1, 0, 2),
(10517, 'a', '1/3/2000', 1.11, 0, 2),
(10517, 'a', '1/4/2000', 1.12, 1, 2),
], columns = ['PERMNO', 'byvar', 'Date', 'RET', 'weight', 'portfolio'])
avgs, ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar')
w_avgs, w_ports = dero.pandas.portfolio_averages(input_data.df_weight, 'RET', 'RET', ngroups=2,
byvars='byvar', wtvar='weight')
def test_simple_averages(self):
assert_frame_equal(self.expect_avgs_no_wt, self.avgs, check_dtype=False)
def test_weighted_averages(self):
assert_frame_equal(self.expect_avgs_wt, self.w_avgs, check_dtype=False)
def test_portfolio_construction(self):
print(self.ports)
assert_frame_equal(self.expect_ports, self.ports, check_dtype=False)
assert_frame_equal(self.expect_ports, self.w_ports, check_dtype=False)
class TestWinsorize(DataFrameTest):
def test_winsor_40_subset_byvars(self):
expect_df = pd.DataFrame(data = [
(10516, 'a', '1/1/2000', 1.022624),
(10516, 'a', '1/2/2000', 1.022624),
(10516, 'a', '1/3/2000', 1.02672),
(10516, 'a', '1/4/2000', 1.02672),
(10516, 'b', '1/1/2000', 1.062624),
(10516, 'b', '1/2/2000', 1.062624),
(10516, 'b', '1/3/2000', 1.06672),
(10516, 'b', '1/4/2000', 1.06672),
(10517, 'a', '1/1/2000', 1.102624),
(10517, 'a', '1/2/2000', 1.102624),
(10517, 'a', '1/3/2000', 1.10672),
(10517, 'a', '1/4/2000', 1.10672),
], columns = ['PERMNO', 'byvar', 'Date', 'RET'])
wins = dero.pandas.winsorize(self.df, .4, subset='RET', byvars=['PERMNO','byvar'])
assert_frame_equal(expect_df, wins, check_less_precise=True)
class TestRegBy(DataFrameTest):
def create_indf(self):
indf = self.df_weight.copy()
indf['key'] = indf['PERMNO'].astype(str) + '_' + indf['byvar']
return indf
def test_regby_nocons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.48774684748988806, '10516_a'),
(0.9388636664168903, '10516_b'),
(0.22929206076239614, '10517_a'),
], columns = ['coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key', cons=False)
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons(self):
indf = self.create_indf()
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(-32.89999999999997, 29.999999999999982, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
def test_regby_cons_low_obs(self):
indf = self.create_indf().loc[:8,:] #makes it so that one byvar only has one obs
expect_df = pd.DataFrame(data = [
(0.49999999999999645, 5.329070518200751e-15, '10516_a'),
(0.9999999999999893, 1.0658141036401503e-14, '10516_b'),
(nan, nan, '10517_a'),
], columns = ['const', 'coef_RET', 'key'])
rb = dero.pandas.reg_by(indf, 'weight', 'RET', 'key')
print('Reg by: ', rb)
assert_frame_equal(expect_df, rb)
class TestExpandMonths(DataFrameTest):
def test_expand_months_tradedays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-19 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-20 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-21 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-24 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-25 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-26 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-27 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-28 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-31 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
], columns = ['Daily Date', 'byvar', 'Date', 'TICKER'])
em = dero.pandas.expand_months(self.single_ticker_df)
assert_frame_equal(expect_df.sort_index(axis=1), em.sort_index(axis=1))
def test_expand_months_calendardays(self):
expect_df = pd.DataFrame(data = [
(Timestamp('2000-01-01 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-02 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-03 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-04 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-05 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-06 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-07 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-08 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-09 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-10 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-11 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-12 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-13 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-14 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-15 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-16 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-17 00:00:00'), 'a', Timestamp('2000-01-01 00:00:00'), 'ADM'),
(Timestamp('2000-01-18 00:00:00'), 'a', | Timestamp('2000-01-01 00:00:00') | pandas.Timestamp |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import os, sys
import pandas as pd
import numpy as np
from glob import glob
# In[2]:
days = sorted(
glob('./huabei/wanlong/*'),
key = lambda x: int( os.path.basename(x) )
)
# In[3]:
for day in days:
print (os.path.basename(day) )
files = sorted(
glob( os.path.join(day,'*.pkl') ),
key = lambda x: int( os.path.basename(x)[:-4] )
)
sum_df = pd.DataFrame()
for file in files:
df = pd.read_pickle(file)
sum_df = sum_df.append(df, ignore_index=True)
sum_df.astype(
{
'x1' : np.int16,
'x2' : np.int16,
'y1' : np.int16,
'y2' : np.int16,
}
).to_pickle( os.path.join('bulk',os.path.basename(day)+'.pkl') )
# break
# In[4]:
days = sorted( [os.path.basename(i) for i in glob('./huabei/wanlong/*')] )
for idx,day in enumerate(days):
data_df = | pd.read_pickle('./bulk/'+day+'.pkl') | pandas.read_pickle |
# -*- coding: UTF-8 -*-
"""
此脚本用于展示内生性对模型的影响
"""
# 保证脚本与Python3兼容
from __future__ import print_function
import sys
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import statsmodels.api as sm
from statsmodels.sandbox.regression.gmm import IV2SLS
import scipy.stats.stats as scss
def generateLinearData(n):
"""
产生有内生变量的线性回归模型数据
"""
data = | pd.DataFrame() | pandas.DataFrame |
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/dev-05-price-moe.ipynb (unless otherwise specified).
__all__ = ['construct_dispatchable_lims_df', 'construct_pred_mask_df', 'AxTransformer', 'set_ticks', 'set_date_ticks',
'construct_df_pred', 'construct_pred_ts', 'calc_error_metrics', 'get_model_pred_ts', 'weighted_mean_s']
# Cell
import json
import pandas as pd
import numpy as np
import pickle
import scipy
from sklearn import linear_model
from sklearn.metrics import r2_score
from collections.abc import Iterable
from tqdm import tqdm
from moepy import lowess, eda
from .surface import PicklableFunction
# Cell
def construct_dispatchable_lims_df(s_dispatchable, rolling_w=3, daily_quantiles=[0.001, 0.999]):
"""Identifies the rolling limits to be used in masking"""
df_dispatchable_lims = (s_dispatchable
.resample('1d')
.quantile(daily_quantiles)
.unstack()
.rolling(rolling_w*7)
.mean()
.bfill()
.ffill()
.iloc[:-1, :]
)
df_dispatchable_lims.index = pd.to_datetime(df_dispatchable_lims.index.strftime('%Y-%m-%d'))
return df_dispatchable_lims
def construct_pred_mask_df(df_pred, df_dispatchable_lims):
"""Constructs a DataFrame mask for the prediction"""
df_pred = df_pred[df_dispatchable_lims.index]
df_pred_mask = pd.DataFrame(dict(zip(df_pred.columns, [df_pred.index]*df_pred.shape[1])), index=df_pred.index)
df_pred_mask = (df_pred_mask > df_dispatchable_lims.iloc[:, 0].values) & (df_pred_mask < df_dispatchable_lims.iloc[:, 1].values)
df_pred.columns = pd.to_datetime(df_pred.columns)
df_pred_mask.columns = | pd.to_datetime(df_pred_mask.columns) | pandas.to_datetime |
# coding: utf-8
# CS FutureMobility Tool
# See full license in LICENSE.txt.
import numpy as np
import pandas as pd
#import openmatrix as omx
from IPython.display import display
from openpyxl import load_workbook,Workbook
from time import strftime
import os.path
import mode_choice.model_defs as md
import mode_choice.matrix_utils as mtx
import config
''' Utilities to summarize the outputs of Mode Choice '''
def display_mode_share(mc_obj):
'''
This displays a mode share summary by market segment (with / without vehicle, peak / off-peak) on the IPython notebook.
:param mc_obj: mode choice module object as defined in the IPython notebook
'''
# display mode share tables
avg_trips_by_mode = pd.DataFrame(None)
for purpose in ['HBW','HBO', 'NHB', 'HBSc1', 'HBSc2', 'HBSc3']:
avg_trips_by_mode = avg_trips_by_mode.add(pd.DataFrame({pv:{mode:(mc_obj.table_container.get_table(purpose)[pv][mode].sum()) for mode in mc_obj.table_container.get_table(purpose)[pv]} for pv in ['0_PK','1_PK','0_OP','1_OP']}).T,
fill_value = 0)
avg_mode_share = avg_trips_by_mode.divide(avg_trips_by_mode.sum(1),axis = 0)
display(avg_mode_share.style.format("{:.2%}"))
def write_boston_neighbortown_mode_share_to_excel(mc_obj):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
Applies only to trips to/from Boston
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
out_excel_fn = mc_obj.config.out_path + "mode_share_bosNB_{0}.xlsx".format(strftime("%Y%m%d"))
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
mode_share = pd.DataFrame(columns = md.peak_veh)
trip_table = mc_obj.table_container.get_table(purp)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
#study area zones might not start at zone 0 and could have discontinous TAZ IDs
trip_table_o = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOSTON'], D_slice = md.taz['BOS_AND_NEI'])
trip_table_d = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOS_AND_NEI'], D_slice = md.taz['BOSTON'])
trip_table_b = mtx.OD_slice(trip_table[pv][mode], O_slice = md.taz['BOSTON'], D_slice = md.taz['BOSTON'])
trip_table_bos = trip_table_o + trip_table_d - trip_table_b
mode_share.loc[mode,pv] = trip_table_bos.sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
def write_study_area_mode_share_to_excel(mc_obj, out_excel_fn = None):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
Applies only to trips to/from study area
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
if out_excel_fn is None:
out_excel_fn = mc_obj.config.out_path + "mode_share_study_area_{0}.xlsx".format(strftime("%Y%m%d"))
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
mode_share = pd.DataFrame(columns = md.peak_veh)
trip_table = mc_obj.table_container.get_table(purp)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
trip_table_o = mtx.OD_slice(trip_table[pv][mode], O_slice = md.study_area)
trip_table_d = mtx.OD_slice(trip_table[pv][mode], D_slice = md.study_area)
trip_table_ii = mtx.OD_slice(trip_table[pv][mode], O_slice = md.study_area, D_slice = md.study_area)
trip_table_sa = trip_table_o + trip_table_d - trip_table_ii
mode_share.loc[mode,pv] = trip_table_sa.sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
def write_mode_share_to_excel(mc_obj,purpose, out_excel_fn = None):
'''
Writes mode share summary by purpose and market segment to an Excel workbook.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param purpose: can be a single purpose or 'all', in which case the Excel workbook has six sheets, one for each purpose.
:param out_excel_fn: output Excel filename, by default in the output path defined in config.py
'''
if out_excel_fn is None:
out_excel_fn = mc_obj.config.out_path + "MC_mode_share_{0}_{1}.xlsx".format(purpose, strftime("%Y%m%d"))
if purpose == 'all':
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
for purp in md.purposes:
trip_table = mc_obj.table_container.get_table(purp)
mode_share = pd.DataFrame(columns = md.peak_veh)
for pv in md.peak_veh:
for mode in trip_table[pv].keys():
mode_share.loc[mode,pv] = trip_table[pv][mode].sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purp in book.sheetnames: # if sheetname exists, delete
book.remove(book[purp])
writer.save()
mode_share.to_excel(writer, sheet_name = purp)
writer.save()
elif purpose in md.purposes:
# check if file exists.
if os.path.isfile(out_excel_fn):
book = load_workbook(out_excel_fn)
else:
book = Workbook()
book.save(out_excel_fn)
writer = pd.ExcelWriter(out_excel_fn,engine = 'openpyxl')
writer.book = book
mode_share = pd.DataFrame(columns = md.peak_veh)
for pv in md.peak_veh:
for mode in mc_obj.trips_by_mode[pv].keys():
mode_share.loc[mode,pv] = mc_obj.trips_by_mode[pv][mode].sum()
mode_share['Total'] = mode_share.sum(1)
mode_share['Share'] = mode_share['Total'] / mode_share['Total'].sum()
if purpose in book.sheetnames: # if sheetname exists, delete
book.remove(book[purpose])
writer.save()
mode_share.to_excel(writer, sheet_name = purpose)
writer.save()
def __mt_prod_attr_nhood(mc_obj, trip_table, skim): # miles traveled. For VMT and PMT, by neighborhood
# sum prodct of trip_table - skims
mt_total = trip_table * skim['Length (Skim)']
# calculate marginals
prod = pd.DataFrame(np.sum(mt_total,axis = 1)/2, columns = ['Production'])
attr = pd.DataFrame(np.sum(mt_total,axis = 0) / 2, columns = ['Attraction'])
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
mt_taz = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],prod,attr],axis = 1,join = 'inner')
mt_taz.index.names=['Boston Neighborhood']
return mt_taz.groupby(['BOSTON_NB']).sum()[['Production','Attraction']].reset_index()
def __trip_prod_attr_nhood(mc_obj, trip_table):
mt_total = trip_table
# calculate marginals
prod = pd.DataFrame(np.sum(mt_total,axis = 1), columns = ['Production'])
attr = pd.DataFrame(np.sum(mt_total,axis = 0), columns = ['Attraction'])
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
mt_taz = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],prod,attr],axis = 1,join = 'inner')
mt_taz.index.names=['Boston Neighborhood']
return mt_taz.groupby(['BOSTON_NB']).sum()[['Production','Attraction']].reset_index()
def sm_vmt_by_neighborhood(mc_obj, out_fn = None, by = None, sm_mode = 'SM_RA'):
'''
Summarizes VMT production and attraction by the 26 Boston neighborhoods for Shared Mobility Modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + sm_mode + f'_vmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + sm_mode + f'_vmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports VMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
vmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
auto_trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode] / md.AO_dict[sm_mode]
vmt_table = __mt_prod_attr_nhood(mc_obj,auto_trip_table,skim_dict[peak])
vmt_table['peak'] = peak
vmt_table['veh_own'] = veh_own
vmt_table['purpose'] = purpose
vmt_master_table = vmt_master_table.append(vmt_table, sort = True)
if by == None:
vmt_summary = vmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
vmt_summary = pd.concat([
vmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
vmt_summary = pd.concat([
vmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
vmt_summary = pd.concat([
vmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in vmt_master_table.purpose.unique()],axis = 1, keys= vmt_master_table.purpose.unique())
vmt_summary.to_csv(out_fn)
def vmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes VMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'vmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'vmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports VMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
vmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
auto_trip_table = sum([
mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] / md.AO_dict[mode]
for mode in ['DA','SR2','SR3+','SM_RA','SM_SH'] if mode in drive_modes])
vmt_table = __mt_prod_attr_nhood(mc_obj,auto_trip_table,skim_dict[peak])
vmt_table['peak'] = peak
vmt_table['veh_own'] = veh_own
vmt_table['purpose'] = purpose
vmt_master_table = vmt_master_table.append(vmt_table, sort = True)
if by == None:
vmt_summary = vmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
vmt_summary = pd.concat([
vmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
vmt_summary = pd.concat([
vmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
vmt_summary = pd.concat([
vmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in vmt_master_table.purpose.unique()],axis = 1, keys= vmt_master_table.purpose.unique())
vmt_summary.to_csv(out_fn)
def pmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'pmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'pmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports PMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
pmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in md.modes if mode in drive_modes])
pmt_table = __mt_prod_attr_nhood(mc_obj,person_trip_table,skim_dict[peak])
pmt_table['peak'] = peak
pmt_table['veh_own'] = veh_own
pmt_table['purpose'] = purpose
pmt_master_table = pmt_master_table.append(pmt_table, sort = True)
if by == None:
pmt_summary = pmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
pmt_summary = pd.concat([
pmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
pmt_summary = pd.concat([
pmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
pmt_summary = pd.concat([
pmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in pmt_master_table.purpose.unique()],axis = 1, keys= pmt_master_table.purpose.unique())
pmt_summary.to_csv(out_fn)
def act_pmt_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods for active modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'act_pmt_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'act_pmt_by_neighborhood_by_{by}.csv'
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
if by in ['peak','veh_own','purpose'] == False:
print('Only supports PMT by neighborhood, peak / vehicle ownership, purpose.')
return
else:
pmt_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in ['Walk','Bike'] if mode in drive_modes])
pmt_table = __mt_prod_attr_nhood(mc_obj,person_trip_table,skim_dict[peak])
pmt_table['peak'] = peak
pmt_table['veh_own'] = veh_own
pmt_table['purpose'] = purpose
pmt_master_table = pmt_master_table.append(pmt_table, sort = True)
if by == None:
pmt_summary = pmt_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
pmt_summary = pd.concat([
pmt_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
pmt_summary = pd.concat([
pmt_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
pmt_summary = pd.concat([
pmt_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in pmt_master_table.purpose.unique()],axis = 1, keys= pmt_master_table.purpose.unique())
pmt_summary.to_csv(out_fn)
def sm_trips_by_neighborhood(mc_obj, out_fn = None, by = None, sm_mode = 'SM_RA'):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods for Shared Mobility Modes.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
:param sm_mode: Smart Mobility Mode name
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + sm_mode + f'_trips_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + sm_mode + f'_trips_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports Trips by neighborhood, peak / vehicle ownership, purpose.')
return
else:
trp_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
person_trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode]
trp_table = __trip_prod_attr_nhood(mc_obj,person_trip_table)
trp_table['peak'] = peak
trp_table['veh_own'] = veh_own
trp_table['purpose'] = purpose
trp_master_table = trp_master_table.append(trp_table, sort = True)
if by == None:
trp_summary = trp_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
trp_summary = pd.concat([
trp_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
trp_summary = pd.concat([
trp_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
trp_summary = pd.concat([
trp_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in trp_master_table.purpose.unique()],axis = 1, keys= trp_master_table.purpose.unique())
trp_summary.to_csv(out_fn)
def trips_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes PMT production and attraction by the 26 Boston neighborhoods.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary; if None specified, only aggregate production and attraction will be provided.
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'trips_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'trips_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports Trips by neighborhood, peak / vehicle ownership, purpose.')
return
else:
trp_master_table = pd.DataFrame(columns = ['Production','Attraction','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
drive_modes = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'].keys()
person_trip_table = sum([mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode] for mode in md.modes if mode in drive_modes])
trp_table = __trip_prod_attr_nhood(mc_obj,person_trip_table)
trp_table['peak'] = peak
trp_table['veh_own'] = veh_own
trp_table['purpose'] = purpose
trp_master_table = trp_master_table.append(trp_table, sort = True)
if by == None:
trp_summary = trp_master_table.groupby('BOSTON_NB').sum()
elif by == 'peak':
trp_summary = pd.concat([
trp_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak] for peak in ['PK','OP']], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
trp_summary = pd.concat([
trp_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own] for veh_own in ['0','1']], axis = 1, keys = ['No car', 'With car']
)
elif by == 'purpose':
trp_summary = pd.concat([
trp_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose] for purpose in trp_master_table.purpose.unique()],axis = 1, keys= trp_master_table.purpose.unique())
trp_summary.to_csv(out_fn)
def mode_share_by_neighborhood(mc_obj, out_fn = None, by = None):
'''
Summarizes mode share as the average of trips to/from the 26 Boston neighborhoods, in three categories - drive, non-motorized and transit.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'mode_share_by_neighborhood.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'mode_share_by_neighborhood_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports mode share by neighborhood, peak / vehicle ownership, purpose.')
return
else:
share_master_table = pd.DataFrame(columns = ['drive','non-motorized','transit','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
share_table = pd.DataFrame(index = range(0,md.max_zone),columns = ['drive','non-motorized','transit','smart mobility']).fillna(0)
for mode in mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}']:
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
category = md.mode_categories[mode]
share_table[category] += (trip_table.sum(axis = 1)+trip_table.sum(axis = 0))/2
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
trips = pd.concat([towns[[md.taz_ID_field,'BOSTON_NB']],share_table],axis = 1,join = 'inner').groupby(['BOSTON_NB']).sum().drop([md.taz_ID_field],axis = 1)
trips['peak'] = peak
trips['veh_own'] = veh_own
trips['purpose'] = purpose
share_master_table = share_master_table.append(trips.reset_index(), sort = True)
if by == None:
trip_summary = share_master_table.groupby('BOSTON_NB').sum()
share_summary = trip_summary.divide(trip_summary.sum(axis = 1),axis = 0)
elif by == 'peak':
share_summary = pd.concat([
share_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak].divide(
share_master_table.groupby(['peak','BOSTON_NB']).sum().loc[peak].sum(axis=1),axis = 0)
for peak in ['PK','OP']
], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
share_summary = pd.concat([
share_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own].divide(
share_master_table.groupby(['veh_own','BOSTON_NB']).sum().loc[veh_own].sum(axis=1),axis = 0)
for veh_own in ['0','1']
], axis = 1, keys = ['No car', 'With car'])
elif by == 'purpose':
share_summary = pd.concat([
share_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose].divide(
share_master_table.groupby(['purpose','BOSTON_NB']).sum().loc[purpose].sum(axis=1),axis = 0)
for purpose in share_master_table.purpose.unique()
],axis = 1, keys= share_master_table.purpose.unique())
share_summary.to_csv(out_fn)
# Seaport method
def mode_share_by_subarea(mc_obj, out_fn = None, by = None):
'''
Summarizes mode share as the average of trips to/from the 7 Seaport sub-areas, in three categories - drive, non-motorized and transit.
:param mc_obj: mode choice module object as defined in the IPython notebook
:param out_fn: output csv filename; if None specified, in the output path defined in config.py
:param by: grouping used for the summary
'''
if out_fn is None and by is None:
out_fn = mc_obj.config.out_path + f'mode_share_by_subarea.csv'
elif out_fn is None and by:
out_fn = mc_obj.config.out_path + f'mode_share_by_subarea_by_{by}.csv'
if by in ['peak','veh_own','purpose'] == False:
print('Only supports mode share by subarea, peak / vehicle ownership, purpose.')
return
else:
share_master_table = pd.DataFrame(columns = ['drive','non-motorized','transit','peak','veh_own','purpose'])
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
share_table = pd.DataFrame(index = range(0,md.max_zone),columns = ['drive','non-motorized','transit','smart mobility']).fillna(0)
for mode in mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}']:
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][mode]
category = md.mode_categories[mode]
share_table[category] += (trip_table.sum(axis = 1)+trip_table.sum(axis = 0))/2
towns = mc_obj.taz.sort_values(md.taz_ID_field).iloc[0:md.max_zone]
towns['REPORT_AREA'] = towns['REPORT_AREA'][towns['REPORT_AREA'].isin(['South Station', 'Seaport Blvd', 'Design Center',
'Southeast Seaport', 'BCEC', 'Fort Point', 'Broadway'])]
trips = pd.concat([towns[[md.taz_ID_field,'REPORT_AREA']],share_table],axis = 1,join = 'inner').groupby(['REPORT_AREA']).sum().drop([md.taz_ID_field],axis = 1)
trips['peak'] = peak
trips['veh_own'] = veh_own
trips['purpose'] = purpose
share_master_table = share_master_table.append(trips.reset_index(), sort = True)
if by == None:
trip_summary = share_master_table.groupby('REPORT_AREA').sum()
share_summary = trip_summary.divide(trip_summary.sum(axis = 1),axis = 0)
elif by == 'peak':
share_summary = pd.concat([
share_master_table.groupby(['peak','REPORT_AREA']).sum().loc[peak].divide(
share_master_table.groupby(['peak','REPORT_AREA']).sum().loc[peak].sum(axis=1),axis = 0)
for peak in ['PK','OP']
], axis = 1, keys = ['PK','OP'])
elif by == 'veh_own':
share_summary = pd.concat([
share_master_table.groupby(['veh_own','REPORT_AREA']).sum().loc[veh_own].divide(
share_master_table.groupby(['veh_own','REPORT_AREA']).sum().loc[veh_own].sum(axis=1),axis = 0)
for veh_own in ['0','1']
], axis = 1, keys = ['No car', 'With car'])
elif by == 'purpose':
share_summary = pd.concat([
share_master_table.groupby(['purpose','REPORT_AREA']).sum().loc[purpose].divide(
share_master_table.groupby(['purpose','REPORT_AREA']).sum().loc[purpose].sum(axis=1),axis = 0)
for purpose in share_master_table.purpose.unique()
],axis = 1, keys= share_master_table.purpose.unique())
share_summary.to_csv(out_fn)
def __sm_compute_summary_by_subregion(mc_obj,metric = 'VMT',subregion = 'neighboring', sm_mode='SM_RA'):
''' Computing function used by write_summary_by_subregion(), does not produce outputs'''
if metric.lower() not in ('vmt','pmt','mode share','trip', 'pmt_act'):
print('Only supports trip, VMT, PMT and mode share calculations.')
return
if subregion.lower() not in ('boston','neighboring','i93','i495','region'):
print('Only supports within boston, "neighboring" for towns neighboring Boston, I93, I495 or Region.')
return
subregion_dict = {'boston':'BOSTON','neighboring':'BOS_AND_NEI','i93':'in_i95i93','i495':'in_i495'}
if metric.lower() == 'vmt':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
vmt_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode] / md.AO_dict[sm_mode]
vmt_table += trip_table * skim_dict[peak]['Length (Skim)']
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_auto_vmt = mtx.OD_slice(vmt_table,O_slice = md.taz['BOSTON'], D_slice = md.taz[field]== True)
boston_d_auto_vmt = mtx.OD_slice(vmt_table,md.taz[field]== True,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_auto_vmt = vmt_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_auto_vmt = mtx.OD_slice(vmt_table,O_slice = md.taz['BOSTON'])
boston_d_auto_vmt = mtx.OD_slice(vmt_table,D_slice = md.taz['BOSTON'])
#boston_o_auto_vmt = vmt_table[md.taz['BOSTON'],:]
#boston_d_auto_vmt = vmt_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_vmt_daily_o = pd.DataFrame(np.sum(boston_o_auto_vmt,axis=1)/2 ,columns=["VMT"])
zone_vmt_daily_d = pd.DataFrame(np.sum(boston_d_auto_vmt,axis=0)/2 ,columns=["VMT"])
town_vmt_o=pd.concat([town_definition,zone_vmt_daily_o],axis=1,join='inner')
town_vmt_d=pd.concat([town_definition,zone_vmt_daily_d],axis=1,join='inner')
vmt_sum_o = town_vmt_o[town_vmt_o['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
vmt_sum_d = town_vmt_d[town_vmt_d['TOWN']=='BOSTON,MA'].groupby(['TOWN']).sum()['VMT']
subregion_vmt = (vmt_sum_o + vmt_sum_d).values[0]
return subregion_vmt
elif metric.lower() == 'trip':
skim_dict = {'PK': mc_obj.drive_skim_PK,'OP':mc_obj.drive_skim_OP}
tripsum_table = np.zeros((md.max_zone,md.max_zone))
for purpose in md.purposes:
for peak in ['PK','OP']:
for veh_own in ['0','1']:
if mc_obj.table_container.get_table(purpose):
trip_table = mc_obj.table_container.get_table(purpose)[f'{veh_own}_{peak}'][sm_mode]
tripsum_table += trip_table
if subregion.lower() in subregion_dict:
field = subregion_dict[subregion.lower()]
boston_o_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz['BOSTON'],D_slice = md.taz[field]== True)
boston_d_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz[field]== True, D_slice = md.taz['BOSTON'])
#boston_o_trip = tripsum_table[md.taz['BOSTON'],:][:, md.taz[field]== True]
#boston_d_trip = tripsum_table[md.taz[field]== True,:][:,md.taz['BOSTON']]
town_definition = md.taz[md.taz[field]== True]
elif subregion.lower() == 'region':
boston_o_trip = mtx.OD_slice(tripsum_table, O_slice = md.taz['BOSTON'])
boston_d_trip = mtx.OD_slice(tripsum_table, D_slice = md.taz['BOSTON'])
#boston_o_trip = tripsum_table[md.taz['BOSTON'],:]
#boston_d_trip = tripsum_table[:][:,md.taz['BOSTON']]
town_definition = md.taz
zone_daily_o = pd.DataFrame(np.sum(boston_o_trip,axis=1) ,columns=["trips"])
zone_daily_d = pd.DataFrame(np.sum(boston_d_trip,axis=0) ,columns=["trips"])
town_o= | pd.concat([town_definition,zone_daily_o],axis=1,join='inner') | pandas.concat |
import os
import cv2
import glob
import csv
import argparse
import pandas as pd
from tqdm import tqdm
import linecache, shutil
from joblib import Parallel, delayed
parser = argparse.ArgumentParser(description='cropping by class')
parser.add_argument('-cn', '--change_name', help='change "OO_checked" to "OO_cropped" (y/n) ', default = 'n')
args = parser.parse_args()
folder = "pocari-cm.mp4" # must be "OO_output"
PWD = os.getcwd() + "/"
path = PWD + folder + "_output"
files2=glob.glob(path + "/*")
csvpath_read = os.path.join(path, folder) + ".csv"
global total, fcnt, width, height, x_gap, y_gap, thorn, info
total = 0
fcnt = 1
## change setting here ##
width = 256 # cropped image's width
height = 256 # cropped image's height
x_gap = 30 # gap between cropped images' left side
y_gap = 30 # gap between cropped images' upper side
#########################
thorn = 0 #always be 0
info = 'width={} \nheight={} \nx_gap={} \ny_gap={} \nthorn={}'.format(width, height, x_gap, y_gap, thorn)
print('--directories to be cropped--')
for cname in files2:
if cname.endswith("_checked"):
print(os.path.basename(cname))
total+=1
print('-----------------------------')
print('-----Total:{} directories-----'.format(total))
def cropping(cname):
global total, fcnt, width, height, x_gap, y_gap, thorn, info
break_check2 = 0
if not cname.endswith("_checked"):
break_check2=1
if break_check2 == 0:
if args.change_name == 'n':
shutil.copytree(cname, path + "/" + os.path.basename(cname) + "_" + str(width) + "_" + str(height) + "_" + str(x_gap) + "_" + str(y_gap) + "_" + str(thorn))
cname = path + "/" + os.path.basename(cname) + "_" + str(width) + "_" + str(height) + "_" + str(x_gap) + "_" + str(y_gap) + "_" + str(thorn)
csvimgcnt=0
row = 0
start_of_newimg_index = 0
csvpath_write = cname + "/" + os.path.basename(cname[:-8]) + "_cropped.csv"
cw = pd.DataFrame(columns=['image', 'num of ppl'])
cw.to_csv(csvpath_write)
df = pd.read_csv(csvpath_read, index_col=0)
splited = cname.split("_")
namecheck = splited[0] + "_" + splited[1] + "_" + splited[2]
for pt in range(len(df)): # http://www.kisse-logs.com/2017/04/11/python-dataframe-drop/
if df.loc[pt, 'image'] == namecheck:
csvimgcnt+=1
row=pt
start_of_newimg_index = row - csvimgcnt + 1
basename = os.path.basename(cname)
#crpimg = cv2.imread(cname[:-16] + "/" + basename[:-28] + "_annotated.jpg") #this is for checking. need to changed basename num every time. !!! DO NOT DELETE IT !!!
crpimg = cv2.imread(cname + "/LAST/0.jpg")
pbar = tqdm(total=int(((crpimg.shape[0]-height-100)/y_gap)*((crpimg.shape[1]-width-100)/x_gap)))
pbar.set_description("{}: {}".format(fcnt, os.path.basename(cname)))
fcnt+=1
with open(cname + "/crop_info.txt", mode='w') as f:
f.write(info)
for i in range(50, crpimg.shape[0]-height-50, y_gap): # y
for j in range(50, crpimg.shape[1]-width-50, x_gap): # x
x_list = []
y_list = []
cropped=crpimg[i:i+height, j:j+width]
df = | pd.read_csv(csvpath_read, index_col=0) | pandas.read_csv |
from __future__ import print_function
import pandas as pd
import os
import logging
import argparse
'''
This file reads in data related E. coli levels
in Chicago beaches. It is based on the files
analysis.R and split_sheets.R, and is written
such that the dataframe loaded here will match
the R dataframe code exactly.
'''
# This is an adaptation of previous read_data.py so that it runs on Python3
# Some variable names changed. Notably, Client.ID is now Beach
# Added day of week and month variables
# Also adds columns to dataframe:
# YesterdayEcoli : prior days reading
# DayBeforeYesterdayEcoli : two days prior reading
# actual_elevated : where Escherichia_coli >=235
# predicted_elevated : where Drek_Prediction >=235
#
# TODO: verbose
# TODO: use multi-level index on date/beach ?
# TODO: standardize on inplace=True or not inplace
# TODO: how much consistency do we want between python columns
# and the R columns?
# TODO: create better docstrings
# TODO: remove print statements and the import
# TODO: loyola/leone the same?
# TODO: repeats on 2015-06-16 ?
# and some of 2012?
# Just check for these everywhere, why is it happening?
def split_sheets(file_name, year, verbose=False):
'''
Reads in all sheets of an excel workbook, concatenating
all of the information into a single dataframe.
The excel files were unfortunately structured such that
each day had its own sheet.
'''
xls = pd.ExcelFile(file_name)
dfs = []
standardized_col_names = [
'Date', 'Laboratory_ID', 'Beach', 'Reading1',
'Reading2', 'Escherichia_coli', 'Units', 'Sample_Collection_Time'
]
for i, sheet_name in enumerate(xls.sheet_names):
if not xls.book.sheet_by_name(sheet_name).nrows:
# Older versions of ExcelFile.parse threw an error if the sheet
# was empty, explicitly check for this condition.
logging.debug('sheet "{0}" from {1} is empty'.format(sheet_name,
year))
continue
df = xls.parse(sheet_name)
if i == 0 and len(df.columns) > 30:
# This is the master/summary sheet
logging.debug('ignoring sheet "{0}" from {1}'.format(sheet_name,
year))
continue
if df.index.dtype == 'object':
# If the first column does not have a label, then the excel
# parsing engine will helpfully use the first column as
# the index. This is *usually* helpful, but there are two
# days when the first column is missing the typical label
# of 'Laboratory ID'. In this case, peel that index off
# and set its name.
msg = '1st column in sheet "{0}" from {1} is missing title'.format(
sheet_name, year)
logging.debug(msg)
df.reset_index(inplace=True)
df.columns = ['Laboratory ID'] + df.columns.tolist()[1:]
# Insert name of sheet as first column, the sheet name is the date
df.insert(0, u'Date', sheet_name)
for c in df.columns.tolist():
if 'Reading' in c:
# There are about 10 days that have >2 readings for some reason
if int(c[8:]) > 2:
logging.info('sheet "{0}" from {1} has >2 readings'.format(
sheet_name, year)
)
df.drop(c, 1, inplace=True)
# Only take the first 8 columns, some sheets erroneously have >8 cols
df = df.ix[:,0:8]
# Standardize the column names
df.columns = standardized_col_names
dfs.append(df)
df = | pd.concat(dfs) | pandas.concat |
# Copyright 2020, Sophos Limited. All rights reserved.
#
# 'Sophos' and 'Sophos Anti-Virus' are registered trademarks of
# Sophos Limited and Sophos Group. All other product and company
# names mentioned are trademarks or registered trademarks of their
# respective owners.
import torch
import baker
from nets import PENetwork
from generators import get_generator
import tqdm
import os
from config import device
import config
from dataset import Dataset
import pickle
from logzero import logger
from copy import deepcopy
import pandas as pd
import numpy as np
all_tags = Dataset.tags
def detach_and_copy_array(array):
if isinstance(array, torch.Tensor):
return deepcopy(array.cpu().detach().numpy()).ravel()
elif isinstance(array, np.ndarray):
return deepcopy(array).ravel()
else:
raise ValueError("Got array of unknown type {}".format(type(array)))
def normalize_results(labels_dict, results_dict, use_malware=True, use_count=True, use_tags=True):
"""
Take a set of results dicts and break them out into
a single dict of 1d arrays with appropriate column names
that pandas can convert to a DataFrame.
"""
# we do a lot of deepcopy stuff here to avoid a FD "leak" in the dataset generator
# see here: https://github.com/pytorch/pytorch/issues/973#issuecomment-459398189
rv = {}
if use_malware:
rv['label_malware'] = detach_and_copy_array(labels_dict['malware'])
rv['pred_malware'] = detach_and_copy_array(results_dict['malware'])
if use_count:
rv['label_count'] = detach_and_copy_array(labels_dict['count'])
rv['pred_count'] = detach_and_copy_array(results_dict['count'])
if use_tags:
for column, tag in enumerate(all_tags):
rv[f'label_{tag}_tag'] = detach_and_copy_array(labels_dict['tags'][:, column])
rv[f'pred_{tag}_tag']=detach_and_copy_array(results_dict['tags'][:, column])
return rv
@baker.command
def evaluate_network(results_dir, checkpoint_file,
db_path=config.db_path,
evaluate_malware=True,
evaluate_count=True,
evaluate_tags=True,
remove_missing_features='scan'):
"""
Take a trained feedforward neural network model and output evaluation results to a csv in the specified location.
:param results_dir: The directory to which to write the 'results.csv' file; WARNING -- this will overwrite any
existing results in that location
:param checkpoint_file: The checkpoint file containing the weights to evaluate
:param db_path: the path to the directory containing the meta.db file; defaults to the value in config.py
:param evaluate_malware: defaults to True; whether or not to record malware labels and predictions
:param evaluate_count: defaults to True; whether or not to record count labels and predictions
:param evaluate_tags: defaults to True; whether or not to record individual tag labels and predictions
:param remove_missing_features: See help for remove_missing_features in train.py / train_network
"""
os.system('mkdir -p {}'.format(results_dir))
model = PENetwork(use_malware=True, use_counts=True, use_tags=True, n_tags=len(Dataset.tags),
feature_dimension=2381)
model.load_state_dict(torch.load(checkpoint_file))
model.to(device)
generator = get_generator(mode='test', path=db_path, use_malicious_labels=evaluate_malware,
use_count_labels=evaluate_count,
use_tag_labels=evaluate_tags, return_shas=True,
remove_missing_features=remove_missing_features)
logger.info('...running network evaluation')
f = open(os.path.join(results_dir,'results.csv'),'w')
first_batch = True
for shas, features, labels in tqdm.tqdm(generator):
features = features.to(device)
predictions = model(features)
results = normalize_results(labels, predictions)
pd.DataFrame(results, index=shas).to_csv(f, header=first_batch)
first_batch=False
f.close()
print('...done')
import lightgbm as lgb
@baker.command
def evaluate_lgb(lightgbm_model_file,
results_dir,
db_path=config.db_path,
remove_missing_features='scan'
):
"""
Take a trained lightGBM model and perform an evaluation on it. Results will be saved to
results.csv in the path specified in results_dir
:param lightgbm_model_file: Full path to the trained lightGBM model
:param results_dir: The directory to which to write the 'results.csv' file; WARNING -- this will overwrite any
existing results in that location
:param db_path: the path to the directory containing the meta.db file; defaults to the value in config.py
:param remove_missing_features: See help for remove_missing_features in train.py / train_network
"""
os.system('mkdir -p {}'.format(results_dir))
logger.info(f'Loading lgb model from {lightgbm_model_file}')
model = lgb.Booster(model_file=lightgbm_model_file)
generator = get_generator(mode='test', path=db_path, use_malicious_labels=True,
use_count_labels=False,
use_tag_labels=False, return_shas=True,
remove_missing_features=remove_missing_features)
logger.info('running lgb evaluation')
f = open(os.path.join(results_dir, 'results.csv'), 'w')
first_batch = True
for shas, features, labels in tqdm.tqdm(generator):
predictions = {'malware':model.predict(features)}
results = normalize_results(labels, predictions, use_malware=True, use_count=False, use_tags=False)
| pd.DataFrame(results, index=shas) | pandas.DataFrame |
# -*- coding:utf-8 -*-
import numpy as np
import pandas as pd
# import random
# from datetime import datetime, timedelta
# import time
# import re
# from sklearn.externals import joblib
# import requests
# import sys
# from unit import Distance1
# import xlrd
from unit import *
from crawl_data import *
base_path_1 = "./dataset/"
base_path_2 = "./dataset/tmp/"
base_path_3 = "./output/"
nearst = {
"bj": {'miyunshuiku_aq': 'beijing_grid_414', 'tiantan_aq': 'beijing_grid_303', 'yizhuang_aq': 'beijing_grid_323',
'pingchang_aq': 'beijing_grid_264', 'zhiwuyuan_aq': 'beijing_grid_262', 'qianmen_aq': 'beijing_grid_303',
'pinggu_aq': 'beijing_grid_452', 'beibuxinqu_aq': 'beijing_grid_263', 'shunyi_aq': 'beijing_grid_368',
'tongzhou_aq': 'beijing_grid_366', 'yungang_aq': 'beijing_grid_239', 'yufa_aq': 'beijing_grid_278',
'wanshouxigong_aq': 'beijing_grid_303', 'mentougou_aq': 'beijing_grid_240',
'dingling_aq': 'beijing_grid_265',
'donggaocun_aq': 'beijing_grid_452', 'nongzhanguan_aq': 'beijing_grid_324', 'liulihe_aq': 'beijing_grid_216',
'xizhimenbei_aq': 'beijing_grid_283', 'fangshan_aq': 'beijing_grid_238', 'nansanhuan_aq': 'beijing_grid_303',
'huairou_aq': 'beijing_grid_349', 'dongsi_aq': 'beijing_grid_303', 'badaling_aq': 'beijing_grid_224',
'yanqin_aq': 'beijing_grid_225', 'gucheng_aq': 'beijing_grid_261', 'fengtaihuayuan_aq': 'beijing_grid_282',
'wanliu_aq': 'beijing_grid_283', 'yongledian_aq': 'beijing_grid_385', 'aotizhongxin_aq': 'beijing_grid_304',
'dongsihuan_aq': 'beijing_grid_324', 'daxing_aq': 'beijing_grid_301', 'miyun_aq': 'beijing_grid_392',
'guanyuan_aq': 'beijing_grid_282', 'yongdingmennei_aq': 'beijing_grid_303'
},
"ld": {'KC1': 'london_grid_388', 'CD1': 'london_grid_388', 'HV1': 'london_grid_472', 'CD9': 'london_grid_409',
'TD5': 'london_grid_366', 'GR4': 'london_grid_451', 'RB7': 'london_grid_452', 'TH4': 'london_grid_430',
'HR1': 'london_grid_368', 'BL0': 'london_grid_409', 'GR9': 'london_grid_430', 'ST5': 'london_grid_408',
'LH0': 'london_grid_346', 'KF1': 'london_grid_388', 'MY7': 'london_grid_388', 'BX9': 'london_grid_472',
'GN3': 'london_grid_451', 'GN0': 'london_grid_451', 'CT2': 'london_grid_409', 'CT3': 'london_grid_409',
'BX1': 'london_grid_472', 'CR8': 'london_grid_408', 'LW2': 'london_grid_430', 'GB0': 'london_grid_451'
}
}
nearst_wounder_station = {
'ld': {'london_grid_451': 'ILONDON1169', 'london_grid_452': 'IROMFORD7', 'london_grid_409': 'IGLAYIEW96',
'london_grid_430': 'IGLAYIEW96', 'london_grid_408': 'ILONDONM4', 'london_grid_388': 'IGLAYIEW96',
'london_grid_366': 'ILONDON633', 'london_grid_368': 'IGLAYIEW96', 'london_grid_472': 'IBEXLEY16',
'london_grid_346': 'IGLAYIEW96'},
'bj': {'beijing_grid_282': 'IBEIJING355', 'beijing_grid_283': 'I11HAIDI3', 'beijing_grid_385': 'ITONGZHO3',
'beijing_grid_414': 'I11HOUSH2', 'beijing_grid_239': 'IBEIJING250', 'beijing_grid_238': 'IBEIJING250',
'beijing_grid_216': 'IBEIJING250', 'beijing_grid_278': 'I11BAIZH2', 'beijing_grid_262': 'I11HAIDI3',
'beijing_grid_263': 'ICHANGPI3', 'beijing_grid_324': 'ICHAOYAN11', 'beijing_grid_392': 'I11HOUSH2',
'beijing_grid_349': 'I11HOUSH2', 'beijing_grid_452': 'ITONGZHO3', 'beijing_grid_224': 'ICHANGPI3',
'beijing_grid_225': 'ICHANGPI3', 'beijing_grid_264': 'ICHANGPI3', 'beijing_grid_265': 'ICHANGPI3',
'beijing_grid_304': 'IHAIDIAN9', 'beijing_grid_303': 'IXICHENG8', 'beijing_grid_261': 'IBEIJING250',
'beijing_grid_301': 'I11BAIZH2', 'beijing_grid_366': 'ITONGZHO3', 'beijing_grid_368': 'I11HOUSH2',
'beijing_grid_240': 'IBEIJING250', 'beijing_grid_323': 'I11BAIZH2'}}
# 从网站下载数据
def get_data(city, start_time, end_time, current_day=False):
# if city == "bj":
# link2 = 'https://biendata.com/competition/meteorology/' + city + '/' + start_time + '/' + end_time + '/2k0d1d8'
# respones = requests.get(link2)
# if current_day == False:
# with open(base_path_2 + city + "_meteorology_" + start_time + "_" + end_time + ".csv", 'w') as f:
# f.write(respones.text)
# else:
# with open(base_path_2 + city + "_meteorology_current_day.csv", 'w') as f:
# f.write(respones.text)
if current_day == True:
end_time = "2018-07-01-23"
link3 = 'https://biendata.com/competition/meteorology/' + city + '_grid/' + start_time + '/' + end_time + '/2k0d1d8'
respones = requests.get(link3)
if current_day == False:
with open(base_path_2 + city + "_meteorology_grid_" + start_time + "_" + end_time + ".csv", 'w') as f:
f.write(respones.text)
else:
with open(base_path_2 + city + "_meteorology_grid_current_day.csv", 'w') as f:
f.write(respones.text)
# 加载最近的网格站点
def load_nearst_contrary():
nearst_contrary = {}
for city in nearst.keys():
nearst_contrary[city] = {}
for station1, station2 in nearst[city].items():
nearst_contrary[city][station2] = station1
return nearst_contrary
# 加载 历史数据 2018年3月27号之前
def load_data(city, flag=False):
if city == "bj":
filename = base_path_2 + "bj_historical_meo_grid.csv"
else:
filename = base_path_2 + "ld_historical_meo_grid.csv"
df = pd.read_csv(filename, sep=',')
nearst_contrary = load_nearst_contrary()
df = df[df["station_id"].isin(nearst_contrary[city].keys())]
# df = df["stationName"].replace(nearst_contrary[city].keys(), nearst[city].keys())
df.rename(columns={'station_id': 'station_id', 'time': 'time', 'wind_speed': 'wind_speed'}, inplace=True)
df['time'] = | pd.to_datetime(df['time']) | pandas.to_datetime |
import argparse
import json
import os
import random
from pprint import pprint
import pandas as pd
import soundfile as sf
import torch
import yaml
from tqdm import tqdm
from asteroid_gan_exps.data.metricGAN_dataset import MetricGAN
from asteroid.losses import PITLossWrapper, pairwise_neg_sisdr
from asteroid.metrics import get_metrics
from asteroid.utils import tensors_to_device
from asteroid.utils.torch_utils import load_state_dict_in
from generator import make_generator_and_optimizer
parser = argparse.ArgumentParser()
parser.add_argument('--test_dir', type=str, default="data/wav16k/min/test",
help='Test directory including the csv files')
parser.add_argument('--use_gpu', type=int, default=0,
help='Whether to use the GPU for model execution')
parser.add_argument('--exp_dir', default='exp/tmp',
help='Experiment root')
parser.add_argument('--n_save_ex', type=int, default=10,
help='Number of audio examples to save, -1 means all')
compute_metrics = ['si_sdr', 'sdr', 'sir', 'sar', 'stoi', 'pesq']
def main(conf):
# Make the model
model, _, _ = make_generator_and_optimizer(conf['train_conf'])
# Load best model
with open(os.path.join(conf['exp_dir'], 'best_k_models.json'), "r") as f:
best_k = json.load(f)
best_model_path = min(best_k, key=best_k.get)
# Load checkpoint
checkpoint = torch.load(best_model_path, map_location='cpu')
state = checkpoint['state_dict']
state_copy = state.copy()
# # Remove unwanted keys
for keys, values in state.items():
if keys.startswith('discriminator'):
del state_copy[keys]
if keys.startswith('validation'):
del state_copy[keys]
if keys.startswith('generator'):
state_copy[keys.replace('generator.', '')] = state_copy.pop(keys)
model = load_state_dict_in(state_copy, model)
# Handle device placement
if conf['use_gpu']:
model.cuda()
model_device = next(model.parameters()).device
test_set = MetricGAN(csv_dir=conf['test_dir'],
task=conf['train_conf']['data']['task'],
sample_rate=conf['sample_rate'],
n_src=conf['train_conf']['data']['n_src'],
segment=None) # Uses all segment length
# Randomly choose the indexes of sentences to save.
eval_save_dir = os.path.join(conf['exp_dir'])
ex_save_dir = os.path.join(eval_save_dir, 'examples/')
if conf['n_save_ex'] == -1:
conf['n_save_ex'] = len(test_set)
save_idx = random.sample(range(len(test_set)), conf['n_save_ex'])
series_list = []
torch.no_grad().__enter__()
for idx in tqdm(range(len(test_set))):
# Forward the network on the mixture.
mix, sources = tensors_to_device(test_set[idx], device=model_device)
sources = sources.unsqueeze(0)
est_sources = model(mix)
mix_np = mix.cpu().data.numpy()
sources_np = sources.cpu().squeeze(0).data.numpy()
est_sources_np = est_sources.cpu().squeeze(0).data.numpy()
# For each utterance, we get a dictionary with the mixture path,
# the input and output metrics
utt_metrics = get_metrics(mix_np, sources_np, est_sources_np,
sample_rate=conf['sample_rate'],
metrics_list=compute_metrics)
utt_metrics['mix_path'] = test_set.mixture_path
series_list.append(pd.Series(utt_metrics))
# Save some examples in a folder. Wav files and metrics as text.
if idx in save_idx:
local_save_dir = os.path.join(ex_save_dir, 'ex_{}/'.format(idx))
os.makedirs(local_save_dir, exist_ok=True)
sf.write(local_save_dir + "mixture.wav", mix_np[0],
conf['sample_rate'])
# Loop over the sources and estimates
for src_idx, src in enumerate(sources_np):
sf.write(local_save_dir + "s{}.wav".format(src_idx), src,
conf['sample_rate'])
for src_idx, est_src in enumerate(est_sources_np):
sf.write(local_save_dir + "s{}_estimate.wav".format(src_idx),
est_src, conf['sample_rate'])
# Write local metrics to the example folder.
with open(local_save_dir + 'metrics.json', 'w') as f:
json.dump(utt_metrics, f, indent=0)
# Save all metrics to the experiment folder.
all_metrics_df = | pd.DataFrame(series_list) | pandas.DataFrame |
import pandas as pd
from google.cloud.bigquery import Client
def fetch_eth_blocks(client: Client, start_date: str, end_date: str):
sql = f"""
SELECT blocks.timestamp, blocks.number, blocks.transaction_count, blocks.gas_limit, blocks.gas_used, AVG(txs.gas_price) AS mean_gas_price, MIN(txs.gas_price) AS min_gas_price, MAX(txs.gas_price) AS max_gas_price
FROM `bigquery-public-data.ethereum_blockchain.blocks` AS blocks INNER JOIN `bigquery-public-data.ethereum_blockchain.transactions` AS txs ON blocks.timestamp=txs.block_timestamp
WHERE DATE(blocks.timestamp) >= DATE('{start_date}') and DATE(blocks.timestamp) <= DATE('{end_date}')
GROUP BY blocks.timestamp, blocks.number, blocks.transaction_count, blocks.gas_limit, blocks.gas_used
ORDER BY blocks.timestamp
"""
df = client.query(sql).to_dataframe()
df[["mean_gas_price", "min_gas_price", "max_gas_price"]] = df[["mean_gas_price", "min_gas_price",
"max_gas_price"]] / 1000000000
df["gas_used"] = df["gas_used"] / 100000
df["gas_limit"] = df["gas_limit"] / 100000
df = df.rename(columns={'gas_used': 'gas_used_percentage'})
df["timestamp"] = | pd.to_datetime(df["timestamp"]) | pandas.to_datetime |
from datetime import timedelta
from functools import partial
from operator import attrgetter
import dateutil
import numpy as np
import pytest
import pytz
from pandas._libs.tslibs import OutOfBoundsDatetime, conversion
import pandas as pd
from pandas import (
DatetimeIndex, Index, Timestamp, date_range, datetime, offsets,
to_datetime)
from pandas.core.arrays import DatetimeArray, period_array
import pandas.util.testing as tm
class TestDatetimeIndex(object):
@pytest.mark.parametrize('dt_cls', [DatetimeIndex,
DatetimeArray._from_sequence])
def test_freq_validation_with_nat(self, dt_cls):
# GH#11587 make sure we get a useful error message when generate_range
# raises
msg = ("Inferred frequency None from passed values does not conform "
"to passed frequency D")
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01')], freq='D')
with pytest.raises(ValueError, match=msg):
dt_cls([pd.NaT, pd.Timestamp('2011-01-01').value],
freq='D')
def test_categorical_preserves_tz(self):
# GH#18664 retain tz when going DTI-->Categorical-->DTI
# TODO: parametrize over DatetimeIndex/DatetimeArray
# once CategoricalIndex(DTA) works
dti = pd.DatetimeIndex(
[pd.NaT, '2015-01-01', '1999-04-06 15:14:13', '2015-01-01'],
tz='US/Eastern')
ci = pd.CategoricalIndex(dti)
carr = pd.Categorical(dti)
cser = pd.Series(ci)
for obj in [ci, carr, cser]:
result = pd.DatetimeIndex(obj)
tm.assert_index_equal(result, dti)
def test_dti_with_period_data_raises(self):
# GH#23675
data = pd.PeriodIndex(['2016Q1', '2016Q2'], freq='Q')
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(data)
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
DatetimeIndex(period_array(data))
with pytest.raises(TypeError, match="PeriodDtype data is invalid"):
to_datetime(period_array(data))
def test_dti_with_timedelta64_data_deprecation(self):
# GH#23675
data = np.array([0], dtype='m8[ns]')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(data)
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
result = to_datetime(pd.TimedeltaIndex(data))
assert result[0] == Timestamp('1970-01-01')
def test_construction_caching(self):
df = pd.DataFrame({'dt': pd.date_range('20130101', periods=3),
'dttz': pd.date_range('20130101', periods=3,
tz='US/Eastern'),
'dt_with_null': [pd.Timestamp('20130101'), pd.NaT,
pd.Timestamp('20130103')],
'dtns': pd.date_range('20130101', periods=3,
freq='ns')})
assert df.dttz.dtype.tz.zone == 'US/Eastern'
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
result = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(i, result)
@pytest.mark.parametrize('kwargs', [
{'tz': 'dtype.tz'},
{'dtype': 'dtype'},
{'dtype': 'dtype', 'tz': 'dtype.tz'}])
def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):
tz = tz_aware_fixture
i = pd.date_range('20130101', periods=5, freq='H', tz=tz)
kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}
if str(tz) in ('UTC', 'tzutc()'):
warn = None
else:
warn = FutureWarning
with tm.assert_produces_warning(warn, check_stacklevel=False):
result = DatetimeIndex(i.tz_localize(None).asi8, **kwargs)
expected = DatetimeIndex(i, **kwargs)
tm.assert_index_equal(result, expected)
# localize into the provided tz
i2 = DatetimeIndex(i.tz_localize(None).asi8, tz='UTC')
expected = i.tz_localize(None).tz_localize('UTC')
tm.assert_index_equal(i2, expected)
# incompat tz/dtype
pytest.raises(ValueError, lambda: DatetimeIndex(
i.tz_localize(None).asi8, dtype=i.dtype, tz='US/Pacific'))
def test_construction_index_with_mixed_timezones(self):
# gh-11488: no tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01'),
Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# same tz results in DatetimeIndex
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01 10:00'), Timestamp('2011-01-02 10:00')
], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# Different tz results in Index(dtype=object)
result = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
exp = Index([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# length = 1
result = Index([Timestamp('2011-01-01')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# length = 1 with tz
result = Index(
[Timestamp('2011-01-01 10:00', tz='Asia/Tokyo')], name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00')], tz='Asia/Tokyo',
name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_index_with_mixed_timezones_with_NaT(self):
# see gh-11488
result = Index([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01'),
pd.NaT, Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# Same tz results in DatetimeIndex
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# same tz results in DatetimeIndex (DST)
result = Index([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
pd.NaT,
Timestamp('2011-08-01 10:00', tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'), pd.NaT,
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
# different tz results in Index(dtype=object)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')],
name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
result = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00',
tz='US/Eastern')], name='idx')
exp = Index([pd.NaT, Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
dtype='object', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert not isinstance(result, DatetimeIndex)
# all NaT
result = Index([pd.NaT, pd.NaT], name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is None
# all NaT with tz
result = Index([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
exp = DatetimeIndex([pd.NaT, pd.NaT], tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
assert result.tz is not None
assert result.tz == exp.tz
def test_construction_dti_with_mixed_timezones(self):
# GH 11488 (not changed, added explicit tests)
# no tz results in DatetimeIndex
result = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
exp = DatetimeIndex(
[Timestamp('2011-01-01'), Timestamp('2011-01-02')], name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00',
tz='Asia/Tokyo')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00')],
tz='Asia/Tokyo', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# same tz results in DatetimeIndex (DST)
result = DatetimeIndex([Timestamp('2011-01-01 10:00', tz='US/Eastern'),
Timestamp('2011-08-01 10:00',
tz='US/Eastern')],
name='idx')
exp = DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-08-01 10:00')],
tz='US/Eastern', name='idx')
tm.assert_index_equal(result, exp, exact=True)
assert isinstance(result, DatetimeIndex)
# tz mismatch affecting to tz-aware raises TypeError/ValueError
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
name='idx')
msg = 'cannot be converted to datetime64'
with pytest.raises(ValueError, match=msg):
DatetimeIndex([Timestamp('2011-01-01 10:00'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
with pytest.raises(ValueError):
DatetimeIndex([Timestamp('2011-01-01 10:00', tz='Asia/Tokyo'),
Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='US/Eastern', name='idx')
with pytest.raises(ValueError, match=msg):
# passing tz should results in DatetimeIndex, then mismatch raises
# TypeError
Index([pd.NaT, Timestamp('2011-01-01 10:00'),
pd.NaT, Timestamp('2011-01-02 10:00', tz='US/Eastern')],
tz='Asia/Tokyo', name='idx')
def test_construction_base_constructor(self):
arr = [pd.Timestamp('2011-01-01'), pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
arr = [np.nan, pd.NaT, pd.Timestamp('2011-01-03')]
tm.assert_index_equal(pd.Index(arr), pd.DatetimeIndex(arr))
tm.assert_index_equal(pd.Index(np.array(arr)),
pd.DatetimeIndex(np.array(arr)))
def test_construction_outofbounds(self):
# GH 13663
dates = [datetime(3000, 1, 1), datetime(4000, 1, 1),
datetime(5000, 1, 1), datetime(6000, 1, 1)]
exp = Index(dates, dtype=object)
# coerces to object
tm.assert_index_equal(Index(dates), exp)
with pytest.raises(OutOfBoundsDatetime):
# can't create DatetimeIndex
DatetimeIndex(dates)
def test_construction_with_ndarray(self):
# GH 5152
dates = [datetime(2013, 10, 7),
datetime(2013, 10, 8),
datetime(2013, 10, 9)]
data = DatetimeIndex(dates, freq=pd.offsets.BDay()).values
result = DatetimeIndex(data, freq=pd.offsets.BDay())
expected = DatetimeIndex(['2013-10-07',
'2013-10-08',
'2013-10-09'],
freq='B')
tm.assert_index_equal(result, expected)
def test_verify_integrity_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(['1/1/2000'], verify_integrity=False)
def test_range_kwargs_deprecated(self):
# GH#23919
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000', freq='D')
def test_integer_values_and_tz_deprecated(self):
# GH-24559
values = np.array([946684800000000000])
with tm.assert_produces_warning(FutureWarning):
result = DatetimeIndex(values, tz='US/Central')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
tm.assert_index_equal(result, expected)
# but UTC is *not* deprecated.
with tm.assert_produces_warning(None):
result = DatetimeIndex(values, tz='UTC')
expected = pd.DatetimeIndex(['2000-01-01T00:00:00'], tz="US/Central")
def test_constructor_coverage(self):
rng = date_range('1/1/2000', periods=10.5)
exp = date_range('1/1/2000', periods=10)
tm.assert_index_equal(rng, exp)
msg = 'periods must be a number, got foo'
with pytest.raises(TypeError, match=msg):
date_range(start='1/1/2000', periods='foo', freq='D')
with pytest.raises(ValueError):
with tm.assert_produces_warning(FutureWarning):
DatetimeIndex(start='1/1/2000', end='1/10/2000')
with pytest.raises(TypeError):
DatetimeIndex('1/1/2000')
# generator expression
gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))
result = DatetimeIndex(gen)
expected = DatetimeIndex([datetime(2000, 1, 1) + timedelta(i)
for i in range(10)])
tm.assert_index_equal(result, expected)
# NumPy string array
strings = np.array(['2000-01-01', '2000-01-02', '2000-01-03'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# string with NaT
strings = np.array(['2000-01-01', '2000-01-02', 'NaT'])
result = DatetimeIndex(strings)
expected = DatetimeIndex(strings.astype('O'))
tm.assert_index_equal(result, expected)
from_ints = DatetimeIndex(expected.asi8)
tm.assert_index_equal(from_ints, expected)
# non-conforming
pytest.raises(ValueError, DatetimeIndex,
['2000-01-01', '2000-01-02', '2000-01-04'], freq='D')
pytest.raises(ValueError, date_range, start='2011-01-01',
freq='b')
pytest.raises(ValueError, date_range, end='2011-01-01',
freq='B')
pytest.raises(ValueError, date_range, periods=10, freq='D')
@pytest.mark.parametrize('freq', ['AS', 'W-SUN'])
def test_constructor_datetime64_tzformat(self, freq):
# see GH#6572: ISO 8601 format results in pytz.FixedOffset
idx = date_range('2013-01-01T00:00:00-05:00',
'2016-01-01T23:59:59-05:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013-01-01T00:00:00+09:00',
'2016-01-01T23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
# Non ISO 8601 format results in dateutil.tz.tzoffset
idx = date_range('2013/1/1 0:00:00-5:00', '2016/1/1 23:59:59-5:00',
freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(-300))
tm.assert_index_equal(idx, expected)
# Unable to use `US/Eastern` because of DST
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='America/Lima')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
idx = date_range('2013/1/1 0:00:00+9:00',
'2016/1/1 23:59:59+09:00', freq=freq)
expected = date_range('2013-01-01T00:00:00', '2016-01-01T23:59:59',
freq=freq, tz=pytz.FixedOffset(540))
tm.assert_index_equal(idx, expected)
expected_i8 = date_range('2013-01-01T00:00:00',
'2016-01-01T23:59:59', freq=freq,
tz='Asia/Tokyo')
tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
expected = DatetimeIndex(['2013-01-01', '2013-01-02']
).tz_localize('US/Eastern')
tm.assert_index_equal(idx, expected)
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
tz='US/Eastern')
tm.assert_index_equal(idx, expected)
# if we already have a tz and its not the same, then raise
idx = DatetimeIndex(['2013-01-01', '2013-01-02'],
dtype='datetime64[ns, US/Eastern]')
pytest.raises(ValueError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns]'))
# this is effectively trying to convert tz's
pytest.raises(TypeError,
lambda: DatetimeIndex(idx,
dtype='datetime64[ns, CET]'))
pytest.raises(ValueError,
lambda: DatetimeIndex(
idx, tz='CET',
dtype='datetime64[ns, US/Eastern]'))
result = DatetimeIndex(idx, dtype='datetime64[ns, US/Eastern]')
tm.assert_index_equal(idx, result)
def test_constructor_name(self):
idx = date_range(start='2000-01-01', periods=1, freq='A',
name='TEST')
assert idx.name == 'TEST'
def test_000constructor_resolution(self):
# 2252
t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)
idx = DatetimeIndex([t1])
assert idx.nanosecond[0] == t1.nanosecond
def test_disallow_setting_tz(self):
# GH 3746
dti = DatetimeIndex(['2010'], tz='UTC')
with pytest.raises(AttributeError):
dti.tz = pytz.timezone('US/Pacific')
@pytest.mark.parametrize('tz', [
None, 'America/Los_Angeles', pytz.timezone('America/Los_Angeles'),
Timestamp('2000', tz='America/Los_Angeles').tz])
def test_constructor_start_end_with_tz(self, tz):
# GH 18595
start = Timestamp('2013-01-01 06:00:00', tz='America/Los_Angeles')
end = Timestamp('2013-01-02 06:00:00', tz='America/Los_Angeles')
result = date_range(freq='D', start=start, end=end, tz=tz)
expected = DatetimeIndex(['2013-01-01 06:00:00',
'2013-01-02 06:00:00'],
tz='America/Los_Angeles')
tm.assert_index_equal(result, expected)
# Especially assert that the timezone is consistent for pytz
assert pytz.timezone('America/Los_Angeles') is result.tz
@pytest.mark.parametrize('tz', ['US/Pacific', 'US/Eastern', 'Asia/Tokyo'])
def test_constructor_with_non_normalized_pytz(self, tz):
# GH 18595
non_norm_tz = Timestamp('2010', tz=tz).tz
result = DatetimeIndex(['2010'], tz=non_norm_tz)
assert pytz.timezone(tz) is result.tz
def test_constructor_timestamp_near_dst(self):
# GH 20854
ts = [Timestamp('2016-10-30 03:00:00+0300', tz='Europe/Helsinki'),
Timestamp('2016-10-30 03:00:00+0200', tz='Europe/Helsinki')]
result = DatetimeIndex(ts)
expected = DatetimeIndex([ts[0].to_pydatetime(),
ts[1].to_pydatetime()])
tm.assert_index_equal(result, expected)
# TODO(GH-24559): Remove the xfail for the tz-aware case.
@pytest.mark.parametrize('klass', [Index, DatetimeIndex])
@pytest.mark.parametrize('box', [
np.array, partial(np.array, dtype=object), list])
@pytest.mark.parametrize('tz, dtype', [
pytest.param('US/Pacific', 'datetime64[ns, US/Pacific]',
marks=[pytest.mark.xfail(),
pytest.mark.filterwarnings(
"ignore:\\n Passing:FutureWarning")]),
[None, 'datetime64[ns]'],
])
def test_constructor_with_int_tz(self, klass, box, tz, dtype):
# GH 20997, 20964
ts = Timestamp('2018-01-01', tz=tz)
result = klass(box([ts.value]), dtype=dtype)
expected = klass([ts])
assert result == expected
# This is the desired future behavior
@pytest.mark.xfail(reason="Future behavior", strict=False)
@pytest.mark.filterwarnings("ignore:\\n Passing:FutureWarning")
def test_construction_int_rountrip(self, tz_naive_fixture):
# GH 12619
# TODO(GH-24559): Remove xfail
tz = tz_naive_fixture
result = 1293858000000000000
expected = DatetimeIndex([1293858000000000000], tz=tz).asi8[0]
assert result == expected
def test_construction_from_replaced_timestamps_with_dst(self):
# GH 18785
index = pd.date_range(pd.Timestamp(2000, 1, 1),
pd.Timestamp(2005, 1, 1),
freq='MS', tz='Australia/Melbourne')
test = pd.DataFrame({'data': range(len(index))}, index=index)
test = test.resample('Y').mean()
result = pd.DatetimeIndex([x.replace(month=6, day=1)
for x in test.index])
expected = pd.DatetimeIndex(['2000-06-01 00:00:00',
'2001-06-01 00:00:00',
'2002-06-01 00:00:00',
'2003-06-01 00:00:00',
'2004-06-01 00:00:00',
'2005-06-01 00:00:00'],
tz='Australia/Melbourne')
tm.assert_index_equal(result, expected)
def test_construction_with_tz_and_tz_aware_dti(self):
# GH 23579
dti = date_range('2016-01-01', periods=3, tz='US/Central')
with pytest.raises(TypeError):
DatetimeIndex(dti, tz='Asia/Tokyo')
def test_construction_with_nat_and_tzlocal(self):
tz = dateutil.tz.tzlocal()
result = DatetimeIndex(['2018', 'NaT'], tz=tz)
expected = DatetimeIndex([Timestamp('2018', tz=tz), pd.NaT])
tm.assert_index_equal(result, expected)
class TestTimeSeries(object):
def test_dti_constructor_preserve_dti_freq(self):
rng = date_range('1/1/2000', '1/2/2000', freq='5min')
rng2 = DatetimeIndex(rng)
assert rng.freq == rng2.freq
def test_dti_constructor_years_only(self, tz_naive_fixture):
tz = tz_naive_fixture
# GH 6961
rng1 = date_range('2014', '2015', freq='M', tz=tz)
expected1 = date_range('2014-01-31', '2014-12-31', freq='M', tz=tz)
rng2 = date_range('2014', '2015', freq='MS', tz=tz)
expected2 = date_range('2014-01-01', '2015-01-01', freq='MS', tz=tz)
rng3 = date_range('2014', '2020', freq='A', tz=tz)
expected3 = date_range('2014-12-31', '2019-12-31', freq='A', tz=tz)
rng4 = date_range('2014', '2020', freq='AS', tz=tz)
expected4 = date_range('2014-01-01', '2020-01-01', freq='AS', tz=tz)
for rng, expected in [(rng1, expected1), (rng2, expected2),
(rng3, expected3), (rng4, expected4)]:
tm.assert_index_equal(rng, expected)
def test_dti_constructor_small_int(self, any_int_dtype):
# see gh-13721
exp = DatetimeIndex(['1970-01-01 00:00:00.00000000',
'1970-01-01 00:00:00.00000001',
'1970-01-01 00:00:00.00000002'])
arr = np.array([0, 10, 20], dtype=any_int_dtype)
tm.assert_index_equal(DatetimeIndex(arr), exp)
def test_ctor_str_intraday(self):
rng = DatetimeIndex(['1-1-2000 00:00:01'])
assert rng[0].second == 1
def test_is_(self):
dti = date_range(start='1/1/2005', end='12/1/2005', freq='M')
assert dti.is_(dti)
assert dti.is_(dti.view())
assert not dti.is_(dti.copy())
def test_index_cast_datetime64_other_units(self):
arr = np.arange(0, 100, 10, dtype=np.int64).view('M8[D]')
idx = Index(arr)
assert (idx.values == conversion.ensure_datetime64ns(arr)).all()
def test_constructor_int64_nocopy(self):
# GH#1624
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr)
arr[50:100] = -1
assert (index.asi8[50:100] == -1).all()
arr = np.arange(1000, dtype=np.int64)
index = DatetimeIndex(arr, copy=True)
arr[50:100] = -1
assert (index.asi8[50:100] != -1).all()
@pytest.mark.parametrize('freq', ['M', 'Q', 'A', 'D', 'B', 'BH',
'T', 'S', 'L', 'U', 'H', 'N', 'C'])
def test_from_freq_recreate_from_data(self, freq):
org = date_range(start='2001/02/01 09:00', freq=freq, periods=1)
idx = DatetimeIndex(org, freq=freq)
tm.assert_index_equal(idx, org)
org = date_range(start='2001/02/01 09:00', freq=freq,
tz='US/Pacific', periods=1)
idx = DatetimeIndex(org, freq=freq, tz='US/Pacific')
tm.assert_index_equal(idx, org)
def test_datetimeindex_constructor_misc(self):
arr = ['1/1/2005', '1/2/2005', 'Jn 3, 2005', '2005-01-04']
pytest.raises(Exception, DatetimeIndex, arr)
arr = ['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04']
idx1 = DatetimeIndex(arr)
arr = [datetime(2005, 1, 1), '1/2/2005', '1/3/2005', '2005-01-04']
idx2 = DatetimeIndex(arr)
arr = [Timestamp(datetime(2005, 1, 1)), '1/2/2005', '1/3/2005',
'2005-01-04']
idx3 = DatetimeIndex(arr)
arr = np.array(['1/1/2005', '1/2/2005', '1/3/2005',
'2005-01-04'], dtype='O')
idx4 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', '1/3/2005', '2005-01-04'])
idx5 = DatetimeIndex(arr)
arr = to_datetime(['1/1/2005', '1/2/2005', 'Jan 3, 2005', '2005-01-04'
])
idx6 = DatetimeIndex(arr)
idx7 = DatetimeIndex(['12/05/2007', '25/01/2008'], dayfirst=True)
idx8 = DatetimeIndex(['2007/05/12', '2008/01/25'], dayfirst=False,
yearfirst=True)
tm.assert_index_equal(idx7, idx8)
for other in [idx2, idx3, idx4, idx5, idx6]:
assert (idx1.values == other.values).all()
sdate = datetime(1999, 12, 25)
edate = datetime(2000, 1, 1)
idx = date_range(start=sdate, freq='1B', periods=20)
assert len(idx) == 20
assert idx[0] == sdate + 0 * offsets.BDay()
assert idx.freq == 'B'
idx = date_range(end=edate, freq=('D', 5), periods=20)
assert len(idx) == 20
assert idx[-1] == edate
assert idx.freq == '5D'
idx1 = | date_range(start=sdate, end=edate, freq='W-SUN') | pandas.date_range |
# -*- coding: utf-8 -*-
from __future__ import print_function
import nose
from numpy import nan
from pandas import Timestamp
from pandas.core.index import MultiIndex
from pandas.core.api import DataFrame
from pandas.core.series import Series
from pandas.util.testing import (assert_frame_equal, assert_series_equal
)
from pandas.compat import (lmap)
from pandas import compat
import pandas.core.common as com
import numpy as np
import pandas.util.testing as tm
import pandas as pd
class TestGroupByFilter(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.ts = tm.makeTimeSeries()
self.seriesd = tm.getSeriesData()
self.tsd = tm.getTimeSeriesData()
self.frame = DataFrame(self.seriesd)
self.tsframe = DataFrame(self.tsd)
self.df = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.random.randn(8)})
self.df_mixed_floats = DataFrame(
{'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'],
'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'],
'C': np.random.randn(8),
'D': np.array(
np.random.randn(8), dtype='float32')})
index = MultiIndex(levels=[['foo', 'bar', 'baz', 'qux'], ['one', 'two',
'three']],
labels=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3],
[0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=['first', 'second'])
self.mframe = DataFrame(np.random.randn(10, 3), index=index,
columns=['A', 'B', 'C'])
self.three_group = DataFrame(
{'A': ['foo', 'foo', 'foo', 'foo', 'bar', 'bar', 'bar', 'bar',
'foo', 'foo', 'foo'],
'B': ['one', 'one', 'one', 'two', 'one', 'one', 'one', 'two',
'two', 'two', 'one'],
'C': ['dull', 'dull', 'shiny', 'dull', 'dull', 'shiny', 'shiny',
'dull', 'shiny', 'shiny', 'shiny'],
'D': np.random.randn(11),
'E': np.random.randn(11),
'F': np.random.randn(11)})
def test_filter_series(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.Series([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.Series([20, 22, 24], index=[2, 4, 5])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_series_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(s.index))
assert_series_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(s.index))
def test_filter_single_column_df(self):
df = pd.DataFrame([1, 3, 20, 5, 22, 24, 7])
expected_odd = pd.DataFrame([1, 3, 5, 7], index=[0, 1, 3, 6])
expected_even = pd.DataFrame([20, 22, 24], index=[2, 4, 5])
grouper = df[0].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10), expected_odd)
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10), expected_even)
# Test dropna=False.
assert_frame_equal(
grouped.filter(lambda x: x.mean() < 10, dropna=False),
expected_odd.reindex(df.index))
assert_frame_equal(
grouped.filter(lambda x: x.mean() > 10, dropna=False),
expected_even.reindex(df.index))
def test_filter_multi_column_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': [1, 1, 1, 1]})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': [1, 1]}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() - x['B'].sum() > 10),
expected)
def test_filter_mixed_df(self):
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
expected = pd.DataFrame({'A': [12, 12], 'B': ['b', 'c']}, index=[1, 2])
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 10), expected)
def test_filter_out_all_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
assert_series_equal(grouped.filter(lambda x: x.mean() > 1000), s[[]])
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
assert_frame_equal(
grouped.filter(lambda x: x['A'].sum() > 1000), df.loc[[]])
def test_filter_out_no_groups(self):
s = pd.Series([1, 3, 20, 5, 22, 24, 7])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
filtered = grouped.filter(lambda x: x.mean() > 0)
assert_series_equal(filtered, s)
df = pd.DataFrame({'A': [1, 12, 12, 1], 'B': 'a b c d'.split()})
grouper = df['A'].apply(lambda x: x % 2)
grouped = df.groupby(grouper)
filtered = grouped.filter(lambda x: x['A'].mean() > 0)
assert_frame_equal(filtered, df)
def test_filter_out_all_groups_in_df(self):
# GH12768
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=False)
expected = pd.DataFrame({'a': [nan] * 3, 'b': [nan] * 3})
assert_frame_equal(expected, res)
df = pd.DataFrame({'a': [1, 1, 2], 'b': [1, 2, 0]})
res = df.groupby('a')
res = res.filter(lambda x: x['b'].sum() > 5, dropna=True)
expected = pd.DataFrame({'a': [], 'b': []}, dtype="int64")
assert_frame_equal(expected, res)
def test_filter_condition_raises(self):
def raise_if_sum_is_zero(x):
if x.sum() == 0:
raise ValueError
else:
return x.sum() > 0
s = pd.Series([-1, 0, 1, 2])
grouper = s.apply(lambda x: x % 2)
grouped = s.groupby(grouper)
self.assertRaises(TypeError,
lambda: grouped.filter(raise_if_sum_is_zero))
def test_filter_with_axis_in_groupby(self):
# issue 11041
index = pd.MultiIndex.from_product([range(10), [0, 1]])
data = pd.DataFrame(
np.arange(100).reshape(-1, 20), columns=index, dtype='int64')
result = data.groupby(level=0,
axis=1).filter(lambda x: x.iloc[0, 0] > 10)
expected = data.iloc[:, 12:20]
assert_frame_equal(result, expected)
def test_filter_bad_shapes(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby('B')
g_s = s.groupby(s)
f = lambda x: x
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: x == 1
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
f = lambda x: np.outer(x, x)
self.assertRaises(TypeError, lambda: g_df.filter(f))
self.assertRaises(TypeError, lambda: g_s.filter(f))
def test_filter_nan_is_false(self):
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
s = df['B']
g_df = df.groupby(df['B'])
g_s = s.groupby(s)
f = lambda x: np.nan
assert_frame_equal(g_df.filter(f), df.loc[[]])
assert_series_equal(g_s.filter(f), s[[]])
def test_filter_against_workaround(self):
np.random.seed(0)
# Series of ints
s = Series(np.random.randint(0, 100, 1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Series of floats
s = 100 * Series(np.random.random(1000))
grouper = s.apply(lambda x: np.round(x, -1))
grouped = s.groupby(grouper)
f = lambda x: x.mean() > 10
old_way = s[grouped.transform(f).astype('bool')]
new_way = grouped.filter(f)
assert_series_equal(new_way.sort_values(), old_way.sort_values())
# Set up DataFrame of ints, floats, strings.
from string import ascii_lowercase
letters = np.array(list(ascii_lowercase))
N = 1000
random_letters = letters.take(np.random.randint(0, 26, N))
df = DataFrame({'ints': Series(np.random.randint(0, 100, N)),
'floats': N / 10 * Series(np.random.random(N)),
'letters': Series(random_letters)})
# Group by ints; filter on floats.
grouped = df.groupby('ints')
old_way = df[grouped.floats.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['floats'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
# Group by floats (rounded); filter on strings.
grouper = df.floats.apply(lambda x: np.round(x, -1))
grouped = df.groupby(grouper)
old_way = df[grouped.letters.
transform(lambda x: len(x) < N / 10).astype('bool')]
new_way = grouped.filter(lambda x: len(x.letters) < N / 10)
assert_frame_equal(new_way, old_way)
# Group by strings; filter on ints.
grouped = df.groupby('letters')
old_way = df[grouped.ints.
transform(lambda x: x.mean() > N / 20).astype('bool')]
new_way = grouped.filter(lambda x: x['ints'].mean() > N / 20)
assert_frame_equal(new_way, old_way)
def test_filter_using_len(self):
# BUG GH4447
df = DataFrame({'A': np.arange(8),
'B': list('aabbbbcc'),
'C': np.arange(8)})
grouped = df.groupby('B')
actual = grouped.filter(lambda x: len(x) > 2)
expected = DataFrame(
{'A': np.arange(2, 6),
'B': list('bbbb'),
'C': np.arange(2, 6)}, index=np.arange(2, 6))
assert_frame_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = df.loc[[]]
assert_frame_equal(actual, expected)
# Series have always worked properly, but we'll test anyway.
s = df['B']
grouped = s.groupby(s)
actual = grouped.filter(lambda x: len(x) > 2)
expected = Series(4 * ['b'], index=np.arange(2, 6), name='B')
assert_series_equal(actual, expected)
actual = grouped.filter(lambda x: len(x) > 4)
expected = s[[]]
assert_series_equal(actual, expected)
def test_filter_maintains_ordering(self):
# Simple case: index is sequential. #4621
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]})
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Now index is sequentially decreasing.
df.index = np.arange(len(df) - 1, -1, -1)
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
# Index is shuffled.
SHUFFLED = [4, 6, 7, 2, 1, 0, 5, 3]
df.index = df.index[SHUFFLED]
s = df['pid']
grouped = df.groupby('tag')
actual = grouped.filter(lambda x: len(x) > 1)
expected = df.iloc[[1, 2, 4, 7]]
assert_frame_equal(actual, expected)
grouped = s.groupby(df['tag'])
actual = grouped.filter(lambda x: len(x) > 1)
expected = s.iloc[[1, 2, 4, 7]]
assert_series_equal(actual, expected)
def test_filter_multiple_timestamp(self):
# GH 10114
df = DataFrame({'A': np.arange(5, dtype='int64'),
'B': ['foo', 'bar', 'foo', 'bar', 'bar'],
'C': Timestamp('20130101')})
grouped = df.groupby(['B', 'C'])
result = grouped['A'].filter(lambda x: True)
assert_series_equal(df['A'], result)
result = grouped['A'].transform(len)
expected = Series([2, 3, 2, 3, 3], name='A')
assert_series_equal(result, expected)
result = grouped.filter(lambda x: True)
assert_frame_equal(df, result)
result = grouped.transform('sum')
expected = DataFrame({'A': [2, 8, 2, 8, 8]})
assert_frame_equal(result, expected)
result = grouped.transform(len)
expected = DataFrame({'A': [2, 3, 2, 3, 3]})
assert_frame_equal(result, expected)
def test_filter_and_transform_with_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 1, 1, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_multiple_non_unique_int_index(self):
# GH4620
index = [1, 1, 1, 2, 0, 0, 0, 1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_float_index(self):
# GH4620
index = np.array([1, 1, 1, 2, 1, 1, 0, 1], dtype=float)
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_timestamp_index(self):
# GH4620
t0 = Timestamp('2013-09-30 00:05:00')
t1 = Timestamp('2013-10-30 00:05:00')
t2 = Timestamp('2013-11-30 00:05:00')
index = [t1, t1, t1, t2, t1, t1, t0, t1]
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_and_transform_with_non_unique_string_index(self):
# GH4620
index = list('bbbcbbab')
df = DataFrame({'pid': [1, 1, 1, 2, 2, 3, 3, 3],
'tag': [23, 45, 62, 24, 45, 34, 25, 62]}, index=index)
grouped_df = df.groupby('tag')
ser = df['pid']
grouped_ser = ser.groupby(df['tag'])
expected_indexes = [1, 2, 4, 7]
# Filter DataFrame
actual = grouped_df.filter(lambda x: len(x) > 1)
expected = df.iloc[expected_indexes]
assert_frame_equal(actual, expected)
actual = grouped_df.filter(lambda x: len(x) > 1, dropna=False)
expected = df.copy()
expected.iloc[[0, 3, 5, 6]] = np.nan
assert_frame_equal(actual, expected)
# Filter Series
actual = grouped_ser.filter(lambda x: len(x) > 1)
expected = ser.take(expected_indexes)
assert_series_equal(actual, expected)
actual = grouped_ser.filter(lambda x: len(x) > 1, dropna=False)
NA = np.nan
expected = Series([NA, 1, 1, NA, 2, NA, NA, 3], index, name='pid')
# ^ made manually because this can get confusing!
assert_series_equal(actual, expected)
# Transform Series
actual = grouped_ser.transform(len)
expected = Series([1, 2, 2, 1, 2, 1, 1, 2], index, name='pid')
assert_series_equal(actual, expected)
# Transform (a column from) DataFrameGroupBy
actual = grouped_df.pid.transform(len)
assert_series_equal(actual, expected)
def test_filter_has_access_to_grouped_cols(self):
df = | DataFrame([[1, 2], [1, 3], [5, 6]], columns=['A', 'B']) | pandas.core.api.DataFrame |
import pickle
import numpy as np
import pandas as pd
## plot conf
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 7})
width = 8.5/2.54
height = width*(3/4)
###
import os
script_dir = os.path.dirname(os.path.abspath(__file__))
plot_path = './'
male_rarities, female_rarities = pickle.load(open(script_dir+'/plot_pickles/raritys.p', 'rb'))
## Load DPVI fits
epsilons = [0.74]
epsilons = np.array(epsilons)
n_runs = 100
# Load DP results
syn_dpvi_coef_female_dict = pickle.load(open(script_dir+'/plot_pickles/female_coef_dict.p', 'rb'))
syn_dpvi_p_value_female_dict = pickle.load(open(script_dir+'/plot_pickles/female_p_value_dict.p', 'rb'))
syn_dpvi_coef_male_dict = pickle.load(open(script_dir+'/plot_pickles/male_coef_dict.p', 'rb'))
syn_dpvi_p_value_male_dict = pickle.load(open(script_dir+'/plot_pickles/male_p_value_dict.p', 'rb'))
## load bootstrap results
female_names = list( | pd.read_csv('../R/original_bootstrapped/female_bootstrapped.csv', index_col=0) | pandas.read_csv |
import pathlib
import pandas as pd
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import matplotlib.pyplot as plt
import numpy as np
import logging
import plotly.graph_objects as go
import plotly.io as pio
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
pio.templates.default = "plotly_white"
dataset = {
"Cifar10": "--cifar10",
"Cifar100": "--cifar100",
# "SVHN": "--svhn",
"MNIST": "--mnist"
}
models_data = {
"Cifar10": ["--cifar10-vgg19"],
# "Cifar100": ["--cifar100-resnet20", "--cifar100-resnet50"],
"Cifar100": ["--cifar100-vgg19", "--cifar100-resnet20", "--cifar100-resnet50"],
"SVHN": ["--svhn-vgg19"],
"MNIST":["--mnist-lenet"],
}
color_bars_sparsity = {
2: "g",
3: "c",
4: "b",
5: "y"
}
tasks = {
"nb-param-compressed-total",
"finetuned-score",
"param-compression-rate-total"
}
ylabel_task = {
"nb-param-compressed-total": "log(# non-zero value)",
"finetuned-score": "Accuracy",
"param-compression-rate-total": "Compression Rate"
}
scale_tasks = {
"nb-param-compressed-total": "log",
"finetuned-score": "linear",
"param-compression-rate-total": "linear"
}
def get_tucker_results():
results_path_tucker = "2020/04/0_1_compression_tucker_tensortrain"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = pd.read_csv(src_results_path_tucker, header=0)
df_tucker_tt = df_tucker_tt.fillna("None")
df_tucker_tt = df_tucker_tt.assign(**{"only-dense": False, "use-pretrained": False})
df_tucker_tt = df_tucker_tt[df_tucker_tt["compression"] == "tucker"]
return df_tucker_tt
def get_deepfried_results():
results_path_tucker = "2020/05/5_6_compression_baselines"
src_results_path_tucker = root_source_dir / results_path_tucker / "results.csv"
df_tucker_tt = | pd.read_csv(src_results_path_tucker, header=0) | pandas.read_csv |
import os
import argparse
import sys
sys.path.append('../')
from load_paths import load_box_paths
from processing_helpers import *
import pandas as pd
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
mpl.rcParams['pdf.fonttype'] = 42
sns.set_style('whitegrid', {'axes.linewidth': 0.5})
def parse_args():
description = "Simulation run for modeling Covid-19"
parser = argparse.ArgumentParser(description=description)
parser.add_argument(
"-s",
"--stem",
type=str,
help="Name of simulation experiment"
)
parser.add_argument(
"-loc",
"--Location",
type=str,
help="Local or NUCLUSTER",
default="Local"
)
parser.add_argument(
"-perc",
"--overflow_threshold_percents",
type=float,
nargs='+',
help="Calculate probability for specified percent of capacity limit",
default=99
)
return parser.parse_args()
def get_latest_filedate(file_path=os.path.join(datapath, 'covid_IDPH', 'Corona virus reports',
'hospital_capacity_thresholds'), extraThresholds=False):
files = os.listdir(file_path)
files = sorted(files, key=len)
if extraThresholds == False:
files = [name for name in files if not 'extra_thresholds' in name]
if extraThresholds == True:
files = [name for name in files if 'extra_thresholds' in name]
filedates = [item.replace('capacity_weekday_average_', '') for item in files]
filedates = [item.replace('.csv', '') for item in filedates]
latest_filedate = max([int(x) for x in filedates])
fname = f'capacity_weekday_average_{latest_filedate}.csv'
if extraThresholds == True:
fname = f'capacity_weekday_average_{latest_filedate}__extra_thresholds.csv'
return fname
def get_probs(ems_nr, channels=['total_hosp_census', 'crit_det', 'ventilators'], overflow_threshold_percents=[1, 0.8],
param=None, save_csv=False, plot=True):
"""Define columns and labels"""
if ems_nr == 0:
region_suffix = "_All"
region_label = 'Illinois'
else:
region_suffix = "_EMS-" + str(ems_nr)
region_label = region_suffix.replace('_EMS-', 'COVID-19 Region ')
column_list = ['scen_num', 'sample_num', 'time', 'startdate']
grp_channels = ['date']
if param is not None:
column_list = column_list + param
grp_channels = ['date'] + param
column_list_t = column_list
for channel in ['hosp_det', 'crit_det']:
column_list_t.append(channel + region_suffix)
""" Load dataframes"""
df = load_sim_data(exp_name, region_suffix=region_suffix, column_list=column_list, add_incidence=False)
df['total_hosp_census'] = df['hosp_det'] + df['crit_det']
df['ventilators'] = get_vents(df['crit_det'])
capacity_df = load_capacity(ems_nr)
len(df['scen_num'].unique())
df['N_scen_num'] = df.groupby(grp_channels)['scen_num'].transform('count')
df_all = pd.DataFrame()
for channel in channels:
if channel == "crit_det": channel_label = 'icu_availforcovid'
if channel == "hosp_det": channel_label = 'hb_availforcovid'
if channel == "total_hosp_census": channel_label = 'hb_availforcovid'
if channel == "ventilators": channel_label = 'vent_availforcovid'
for overflow_threshold_percent in overflow_threshold_percents:
thresh = capacity_df[f'{channel}'] * overflow_threshold_percent
mdf = df.copy()
mdf.loc[df[f'{channel}'] < thresh, 'above_yn'] = 0
mdf.loc[df[f'{channel}'] >= thresh, 'above_yn'] = 1
mdf = mdf.groupby(grp_channels)['above_yn'].agg(['sum', 'count', 'nunique']).rename_axis(None, axis=0)
mdf = mdf.reset_index()
mdf['prob'] = mdf['sum'] / mdf['count']
mdf = mdf.rename(columns={'sum': 'n_above', 'count': 'N_scen_num', 'index': 'date'})
mdf['overflow_threshold_percent'] = overflow_threshold_percent
mdf['capacity_channel'] = channel_label
mdf['availforcovid'] = capacity_df[f'{channel}']
mdf['region'] = ems_nr
del thresh
if df_all.empty:
df_all = mdf
else:
df_all = pd.concat([df_all, mdf])
del mdf
if plot:
plot_probs(df=df_all, region_label=region_label)
if save_csv:
filename = f'overflow_probabilities_over_time_region_{ems_nr}.csv'
df_all.to_csv(os.path.join(sim_output_path, filename), index=False, date_format='%Y-%m-%d')
return df_all
def plot_probs(df, region_label):
fig = plt.figure(figsize=(12, 4))
fig.suptitle(region_label, y=0.97, fontsize=14)
fig.subplots_adjust(right=0.98, wspace=0.2, left=0.05, hspace=0.4, top=0.84, bottom=0.13)
palette = sns.color_palette('Set1', 12)
axes = [fig.add_subplot(1, 3, x + 1) for x in range(3)]
linestyles = ['solid', 'dashed']
for c, channel in enumerate(df.capacity_channel.unique()):
mdf = df[df['capacity_channel'] == channel]
ax = axes[c]
ax.set_ylim(0, 1)
ax.set_title(channel)
ax.set_ylabel(f'Probability of overflow')
for e, t in enumerate(list(df.overflow_threshold_percent.unique())):
line_label = f'{channel} ({t * 100})%'
adf = mdf[mdf['overflow_threshold_percent'] == t]
ax.plot(adf['date'], adf['prob'], linestyle=linestyles[e], color=palette[c], label=line_label)
ax.xaxis.set_major_formatter(mdates.DateFormatter('%d\n%b\n%Y'))
axes[-1].legend()
plotname = f'overflow_probabilities_{region_label}'
plt.savefig(os.path.join(plot_path, f'{plotname}.png'))
plt.savefig(os.path.join(plot_path, 'pdf', f'{plotname}.pdf'))
def write_probs_to_template(df, plot=True):
fname_capacity = get_latest_filedate()
civis_template = pd.read_csv(
os.path.join(datapath, 'covid_IDPH', 'Corona virus reports', 'hospital_capacity_thresholds', fname_capacity))
civis_template = civis_template.drop_duplicates()
civis_template['date_window_upper_bound'] = pd.to_datetime(civis_template['date_window_upper_bound'])
civis_template_all = pd.DataFrame()
for index, row in civis_template.iterrows():
upper_limit = row['date_window_upper_bound']
lower_limit = upper_limit - pd.Timedelta(7, 'days')
df_sub = df[df['date'].between(lower_limit, upper_limit)]
df_sub = df_sub[df_sub['region'] == int(row['geography_modeled'].replace("covidregion_", ""))]
df_sub = df_sub[df_sub['capacity_channel'] == row['resource_type']]
df_sub = df_sub[df_sub['overflow_threshold_percent'] == row['overflow_threshold_percent']]
"""Take maximum of previous 7 days"""
civis_template.loc[index, 'percent_of_simulations_that_exceed'] = df_sub['prob'].max()
if civis_template_all.empty:
civis_template_all = civis_template
else:
civis_template_all = | pd.concat([civis_template_all, civis_template]) | pandas.concat |
import pathlib
import pytest
import pandas as pd
import numpy as np
import gradelib
EXAMPLES_DIRECTORY = pathlib.Path(__file__).parent / "examples"
GRADESCOPE_EXAMPLE = gradelib.Gradebook.from_gradescope(
EXAMPLES_DIRECTORY / "gradescope.csv"
)
CANVAS_EXAMPLE = gradelib.Gradebook.from_canvas(EXAMPLES_DIRECTORY / "canvas.csv")
# the canvas example has Lab 01, which is also in Gradescope. Let's remove it
CANVAS_WITHOUT_LAB_EXAMPLE = gradelib.Gradebook(
points=CANVAS_EXAMPLE.points.drop(columns="lab 01"),
maximums=CANVAS_EXAMPLE.maximums.drop(index="lab 01"),
late=CANVAS_EXAMPLE.late.drop(columns="lab 01"),
dropped=CANVAS_EXAMPLE.dropped.drop(columns="lab 01"),
)
# given
ROSTER = gradelib.read_egrades_roster(EXAMPLES_DIRECTORY / "egrades.csv")
def assert_gradebook_is_sound(gradebook):
assert gradebook.points.shape == gradebook.dropped.shape == gradebook.late.shape
assert (gradebook.points.columns == gradebook.dropped.columns).all()
assert (gradebook.points.columns == gradebook.late.columns).all()
assert (gradebook.points.index == gradebook.dropped.index).all()
assert (gradebook.points.index == gradebook.late.index).all()
assert (gradebook.points.columns == gradebook.maximums.index).all()
# assignments property
# -----------------------------------------------------------------------------
def test_assignments_are_produced_in_order():
assert list(GRADESCOPE_EXAMPLE.assignments) == list(
GRADESCOPE_EXAMPLE.points.columns
)
# keep_pids()
# -----------------------------------------------------------------------------
def test_keep_pids():
# when
actual = GRADESCOPE_EXAMPLE.keep_pids(ROSTER.index)
# then
assert len(actual.pids) == 3
assert_gradebook_is_sound(actual)
def test_keep_pids_raises_if_pid_does_not_exist():
# given
pids = ["A12345678", "ADNEDNE00"]
# when
with pytest.raises(KeyError):
actual = GRADESCOPE_EXAMPLE.keep_pids(pids)
# keep_assignments() and remove_assignments()
# -----------------------------------------------------------------------------
def test_keep_assignments():
# when
actual = GRADESCOPE_EXAMPLE.keep_assignments(["homework 01", "homework 02"])
# then
assert set(actual.assignments) == {"homework 01", "homework 02"}
assert_gradebook_is_sound(actual)
def test_keep_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.keep_assignments(assignments)
def test_remove_assignments():
# when
actual = GRADESCOPE_EXAMPLE.remove_assignments(
GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
)
# then
assert set(actual.assignments) == {
"homework 01",
"homework 02",
"homework 03",
"homework 04",
"homework 05",
"homework 06",
"homework 07",
"project 01",
"project 02",
}
assert_gradebook_is_sound(actual)
def test_remove_assignments_raises_if_assignment_does_not_exist():
# given
assignments = ["homework 01", "this aint an assignment"]
# then
with pytest.raises(KeyError):
GRADESCOPE_EXAMPLE.remove_assignments(assignments)
# combine()
# -----------------------------------------------------------------------------
def test_combine_with_keep_pids():
# when
combined = gradelib.Gradebook.combine(
[GRADESCOPE_EXAMPLE, CANVAS_WITHOUT_LAB_EXAMPLE], keep_pids=ROSTER.index
)
# then
assert "homework 01" in combined.assignments
assert "midterm exam" in combined.assignments
assert_gradebook_is_sound(combined)
def test_combine_raises_if_duplicate_assignments():
# the canvas example and the gradescope example both have lab 01.
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine([GRADESCOPE_EXAMPLE, CANVAS_EXAMPLE])
def test_combine_raises_if_indices_do_not_match():
# when
with pytest.raises(ValueError):
combined = gradelib.Gradebook.combine(
[CANVAS_WITHOUT_LAB_EXAMPLE, GRADESCOPE_EXAMPLE]
)
# number_of_lates()
# -----------------------------------------------------------------------------
def test_number_of_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=labs)
# then
assert list(actual) == [1, 4, 2, 2]
def test_number_of_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.number_of_lates(within=[])
def test_number_of_lates_with_no_assignment_list_uses_all_assignments():
# when
actual = GRADESCOPE_EXAMPLE.number_of_lates()
# then
assert list(actual) == [1, 5, 2, 2]
# forgive_lates()
# -----------------------------------------------------------------------------
def test_forgive_lates():
# when
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [0, 1, 0, 0]
assert_gradebook_is_sound(actual)
def test_forgive_lates_with_empty_assignment_list_raises():
# when
with pytest.raises(ValueError):
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=3, within=[])
def test_forgive_lates_forgives_the_first_n_lates():
# by "first", we mean in the order specified by the `within` argument
# student A10000000 had late lab 01, 02, 03, and 07
assignments = ["lab 02", "lab 07", "lab 01", "lab 03"]
# when
actual = GRADESCOPE_EXAMPLE.forgive_lates(n=2, within=assignments)
# then
assert not actual.late.loc["A10000000", "lab 02"]
assert not actual.late.loc["A10000000", "lab 07"]
assert actual.late.loc["A10000000", "lab 01"]
assert actual.late.loc["A10000000", "lab 03"]
def test_forgive_lates_does_not_forgive_dropped():
# given
labs = GRADESCOPE_EXAMPLE.assignments.starting_with("lab")
dropped = GRADESCOPE_EXAMPLE.dropped.copy()
dropped.iloc[:, :] = True
example = gradelib.Gradebook(
points=GRADESCOPE_EXAMPLE.points,
maximums=GRADESCOPE_EXAMPLE.maximums,
late=GRADESCOPE_EXAMPLE.late,
dropped=dropped,
)
# when
actual = example.forgive_lates(n=3, within=labs)
# then
assert list(actual.number_of_lates(within=labs)) == [1, 4, 2, 2]
assert_gradebook_is_sound(actual)
# drop_lowest()
# -----------------------------------------------------------------------------
def test_drop_lowest_on_simple_example_1():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = pd.Series(data=[1, 30, 90, 20], index=columns, name="A1")
p2 = pd.Series(data=[2, 7, 15, 20], index=columns, name="A2")
points = pd.DataFrame([p1, p2])
maximums = pd.Series([2, 50, 100, 20], index=columns)
gradebook = gradelib.Gradebook(points, maximums)
homeworks = gradebook.assignments.starting_with("hw")
# if we are dropping 1 HW, the right strategy is to drop the 50 point HW
# for A1 and to drop the 100 point homework for A2
# when
actual = gradebook.drop_lowest(1, within=homeworks)
# then
assert actual.dropped.iloc[0, 1]
assert actual.dropped.iloc[1, 2]
assert list(actual.dropped.sum(axis=1)) == [1, 1]
assert_gradebook_is_sound(actual)
def test_drop_lowest_on_simple_example_2():
# given
columns = ["hw01", "hw02", "hw03", "lab01"]
p1 = | pd.Series(data=[1, 30, 90, 20], index=columns, name="A1") | pandas.Series |
"""
Module for calculating a list of vegetation indices from a datacube containing bands without a user having to implement callback functions
"""
from openeo.rest.datacube import DataCube
from openeo.processes import ProcessBuilder, array_modify, power, sqrt, if_, multiply, divide, arccos, add, subtract, linear_scale_range
from shapely.geometry import Point
import numpy as np
import netCDF4 as nc
import glob
import seaborn as sns
from matplotlib.dates import DateFormatter
import geopandas as gpd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap, BoundaryNorm
import earthpy.plot as ep
import pandas as pd
import rasterio
WL_B04 = 0.6646
WL_B08 = 0.8328
WL_B11 = 1.610
one_over_pi = 1. / np.pi
# source: https://git.vito.be/projects/LCLU/repos/satio/browse/satio/rsindices.py
ndvi = lambda B04, B08: (B08 - B04) / (B08 + B04)
ndmi = lambda B08, B11: (B08 - B11) / (B08 + B11)
ndgi = lambda B03, B04: (B03 - B04) / (B03 + B04)
def anir(B04, B08, B11):
a = sqrt(np.square(WL_B08 - WL_B04) + power(B08 - B04, 2))
b = sqrt(np.square(WL_B11 - WL_B08) + power(B11 - B08, 2))
c = sqrt(np.square(WL_B11 - WL_B04) + power(B11 - B04, 2))
# calculate angle with NIR as reference (ANIR)
site_length = (power(a, 2) + power(b, 2) - power(c, 2)) / (2 * a * b)
site_length = if_(site_length.lt(-1), -1, site_length)
site_length = if_(site_length.gt(1), 1, site_length)
return multiply(one_over_pi, arccos(site_length))
ndre1 = lambda B05, B08: (B08 - B05) / (B08 + B05)
ndre2 = lambda B06, B08: (B08 - B06) / (B08 + B06)
ndre5 = lambda B05, B07: (B07 - B05) / (B07 + B05)
indices = {
"NDVI": [ndvi, (0,1)],
"NDMI": [ndmi, (-1,1)],
"NDGI": [ndgi, (-1,1)],
"ANIR": [anir, (0,1)],
"NDRE1": [ndre1, (-1,1)],
"NDRE2": [ndre2, (-1,1)],
"NDRE5": [ndre5, (-1,1)]
}
def _callback(x: ProcessBuilder, index_list: list, datacube: DataCube, scaling_factor: int) -> ProcessBuilder:
index_values = []
x_res = x
for index_name in index_list:
if index_name not in indices:
raise NotImplementedError("Index " + index_name + " has not been implemented.")
index_fun, index_range = indices[index_name]
band_indices = [
datacube.metadata.get_band_index(band)
for band in index_fun.__code__.co_varnames[:index_fun.__code__.co_argcount]
]
index_result = index_fun(*[x.array_element(i) for i in band_indices])
if scaling_factor is not None:
index_result = index_result.linear_scale_range(*index_range, 0, scaling_factor)
index_values.append(index_result)
if scaling_factor is not None:
x_res = x_res.linear_scale_range(0,8000,0,scaling_factor)
return array_modify(data=x_res, values=index_values, index=len(datacube.metadata._band_dimension.bands))
def compute_indices(datacube: DataCube, index_list: list, scaling_factor: int = None) -> DataCube:
"""
Computes a list of indices from a datacube
param datacube: an instance of openeo.rest.DataCube
param index_list: a list of indices. The following indices are currently implemented: NDVI, NDMI, NDGI, ANIR, NDRE1, NDRE2 and NDRE5
return: the datacube with the indices attached as bands
"""
return datacube.apply_dimension(dimension="bands",
process=lambda x: _callback(x, index_list, datacube, scaling_factor)).rename_labels('bands',
target=datacube.metadata.band_names + index_list)
def lin_scale_range(x,inputMin,inputMax,outputMin,outputMax):
return add(multiply(divide(subtract(x,inputMin), subtract(inputMax, inputMin)), subtract(outputMax, outputMin)), outputMin)
def _random_point_in_shp(shp):
within = False
while not within:
x = np.random.uniform(shp.bounds[0], shp.bounds[2])
y = np.random.uniform(shp.bounds[1], shp.bounds[3])
within = shp.contains(Point(x, y))
return Point(x,y)
def point_sample_fields(crop_samples, nr_iterations):
points = {"name":[], "geometry":[]}
for name,crop_df in crop_samples.items():
for num in range(nr_iterations):
points["name"] += [name]*len(crop_df)
points["geometry"] += np.asarray(crop_df['geometry'].apply(_random_point_in_shp)).tolist()
gpd_points = gpd.GeoDataFrame(points, crs="EPSG:4326")
gpd_points_utm = gpd_points.to_crs("EPSG:32631")
points_per_type = {}
for i in set(gpd_points_utm["name"]):
crop = gpd_points_utm[gpd_points_utm["name"]==i].buffer(1).to_crs("EPSG:4326").to_json()
points_per_type[i] = crop
return points_per_type
def prep_boxplot(year, bands):
df = pd.DataFrame(columns=["Crop type","Date","Band","Iteration nr","Band value"])
for file in glob.glob('.\\data\\300_*\\*.nc'):
ds_orig = nc.Dataset(file)
dt_rng = pd.date_range("01-01-"+str(year), "31-12-"+str(year),freq="MS")
spl = file.split("\\")
f_name = spl[-1].split(".")[0]
crop_type = spl[-2].split("_")[-1]
for band in bands:
try:
ds = ds_orig[band][:]
except:
print("File "+file+" is corrupt. Please remove it from your folder.")
vals = None
if ds.shape[1:3] == (1,2):
vals = np.mean(ds,axis=2).flatten().tolist()
elif ds.shape[1:3] == (2,1):
vals = np.mean(ds,axis=1).flatten().tolist()
elif ds.shape[1:3] == (1,1):
vals = ds.flatten().tolist()
elif ds.shape[1:3] == (2,2):
vals = np.mean(np.mean(ds,axis=1),axis=1).tolist()
else:
print(file)
df = df.append(pd.DataFrame({
"Crop type": crop_type,
"Date": dt_rng,
"Band": band,
"Iteration nr": [f_name]*12,
"Band value": vals
}), ignore_index=True)
df["Band value"] /= 250
return df
def create_boxplots(crop_df=None, year=2019):
bands = ["B08", "B11", "NDVI", "ratio"]
if crop_df is None:
crop_df = prep_boxplot(year, bands)
x_dates = crop_df["Date"].dt.strftime("%m-%d-%y").unique()
for crop in set(crop_df["Crop type"]):
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2,figsize=(18,18))
fig.suptitle(crop,y=0.91)
axes = [ax1, ax2, ax3, ax4]
df_m = crop_df[crop_df["Crop type"]==crop]
for i in range(4):
df_m_n = df_m[df_m["Band"]==bands[i]]
sns.boxplot(ax=axes[i],data=df_m_n, x="Date",y="Band value")
axes[i].set_xticklabels(labels=x_dates, rotation=45, ha='right')
axes[i].title.set_text(str(bands[i])+" per month")
axes[i].set_ylim(0,1)
comb = {
0: "none",
1: "corn",
2: "barley",
3: "corn barley",
4: "sugarbeet",
5: "sugarbeet corn",
6: "sugarbeet barley",
7: "sugarbeet barley corn",
8: "potato",
9: "potato corn",
10: "potato barley",
11: "potato barley corn",
12: "potato sugarbeet",
13: "potato sugarbeet corn",
14: "potato sugarbeet barley",
15: "potato sugarbeet barley corn",
16: "soy",
17: "soy corn",
18: "soy barley",
19: "soy barley corn",
20: "soy sugarbeet",
21: "soy sugarbeet corn",
22: "soy sugarbeet barley",
23: "soy sugarbeet barley corn",
24: "soy potato",
25: "soy potato corn",
26: "soy potato barley",
27: "soy potato barley corn",
28: "soy potato sugarbeet",
29: "soy potato sugarbeet corn",
30: "soy potato sugarbeet barley",
31: "soy potato sugarbeet barley corn"
}
col_palette = ['linen',
'chartreuse',
'tomato',
'olivedrab',
'maroon',
'whitesmoke',
'wheat',
'palevioletred',
'darkturquoise',
'tomato',
'thistle',
'teal',
'darkgoldenrod',
'darkmagenta',
'darkorange',
'sienna',
'black',
'silver',
'tan',
'seagreen',
'mediumspringgreen',
'lightseagreen',
'royalblue',
'mediumpurple',
'plum',
'darkcyan',
'moccasin',
'rosybrown',
'gray',
'sandybrown',
'm',
'navy']
def plot_croptypes(fn='./data/total.tif',only_unique_classes=True):
with rasterio.open(fn,mode="r+",crs=rasterio.crs.CRS({"init": "epsg:4326"})) as dataset:
ds = dataset.read(1)
if only_unique_classes:
ds = np.where(np.isin(ds, [1,2,4,8,16]), ds, 0)
keys = np.unique(ds).astype(int)
height_class_labels = [comb[key] for key in comb.keys() if key in keys]
colors = col_palette[0:len(height_class_labels)]
cmap = ListedColormap(colors)
class_bins = [-0.5]+[i+0.5 for i in keys]
norm = BoundaryNorm(class_bins, len(colors))
f, ax = plt.subplots(figsize=(10, 8))
im = ax.imshow(ds, cmap=cmap, norm=norm)
ep.draw_legend(im, titles=height_class_labels)
ax.set(title="Rule-based crop classification")
ax.set_axis_off()
plt.show()
def get_classification_colors():
cmap = ListedColormap(col_palette)
classification_colors = {x:cmap(x) for x in range(0, len(col_palette))}
return classification_colors
def get_trained_model():
year = 2019
bands = ["B08", "B11", "NDVI", "ratio"]
df = pd.DataFrame(columns=["Crop type","Date","Iteration nr"]+bands)
for file in glob.glob('.\\data\\300_*\\*.nc'):
ds_orig = nc.Dataset(file)
dt_rng = pd.date_range("01-01-"+str(year), "01-01-"+str(year+1),freq="MS")
spl = file.split("\\")
f_name = spl[-1].split(".")[0]
crop_type = spl[-2].split("_")[-1]
df_row = {
"Crop type": crop_type[0:12],
"Date": dt_rng[0:12],
"Iteration nr": [f_name]*12,
}
for band in bands:
try:
ds = ds_orig[band][:]
except:
print("File "+file+" is corrupt. Please remove it from your folder.")
vals = None
if ds.shape[1:3] == (1,2):
vals = np.mean(ds,axis=2).flatten().tolist()
elif ds.shape[1:3] == (2,1):
vals = np.mean(ds,axis=1).flatten().tolist()
elif ds.shape[1:3] == (1,1):
vals = ds.flatten().tolist()
elif ds.shape[1:3] == (2,2):
vals = np.mean(np.mean(ds,axis=1),axis=1).tolist()
else:
print(file)
df_row[band] = vals[0:12] #[x/250 if x is not None else x for x in vals]
df = df.append(pd.DataFrame(df_row), ignore_index=True)
df = df[pd.notnull(df["B08"])]
X = df[["NDVI","B08","B11","ratio"]]
y = df["Crop type"]
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3)
clf=RandomForestClassifier(n_estimators=100)
clf.fit(X_train,y_train)
return clf
def prep_df(year, bands):
df = pd.DataFrame(columns=["Crop type","Iteration nr"]+bands)
for file in glob.glob('.\\data\\rf_300_*\\*.nc'):
ds_orig = nc.Dataset(file)
spl = file.split("\\")
f_name = spl[-1].split(".")[0]
crop_type = spl[-2].split("_")[-1]
df_row = {
"Crop type": [crop_type],
"Iteration nr": [f_name],
}
for band in bands:
try:
ds = ds_orig[band][:]
except:
print("File "+file+" is corrupt. Please remove it from your folder.")
vals = None
ds[ds.mask] = np.nan
if ds.shape in [(1,2),(2,1),(1,1),(2,2)]:
vals = np.mean(ds)
else:
print(file)
df_row[band] = [vals]
df = df.append( | pd.DataFrame(df_row) | pandas.DataFrame |
import pandas as pd
import numpy as np
import glob
import data.normalise as nm
from data.duplicates import group_duplicates
from data.features import compute_features
nw_features_disc = {
'Time': {
'func': nm.change_time,
'input': 'time'
},
'Date': {
'func': nm.has_dates,
'input': 'message'
},
'Number': {
'func': nm.has_numbers,
'input': 'message'
},
'Decimal': {
'func': nm.has_decimals,
'input': 'message'
},
'URL': {
'func': nm.has_urls,
'input': 'message'
}
}
if __name__ == '__main__':
all_files = glob.glob('data/raw_db' + '/*.csv')
li = []
for filename in all_files:
df = pd.read_csv(filename)
df.columns = ['sender', 'time', 3, 'message']
li.append(df)
data = pd.concat(li, axis=0)
sms = data.iloc[:, [0, 1, 3]]
sms = sms.drop(sms.columns[0], axis=1) # doubt
sms['message'] = sms['message'].apply(nm.remove_newlines)
human = pd.read_csv('data/pruned_db/old_human.csv')
human.columns = ['index', 'sender', 'message', 'label']
human = | pd.concat(human, [sms.iloc[:, [0, 1, 2]]]) | pandas.concat |
import numpy as np
import pandas as pd
from numba import njit
from datetime import datetime
import pytest
from itertools import product
from sklearn.model_selection import TimeSeriesSplit
import vectorbt as vbt
from vectorbt.generic import nb
seed = 42
day_dt = np.timedelta64(86400000000000)
df = pd.DataFrame({
'a': [1, 2, 3, 4, np.nan],
'b': [np.nan, 4, 3, 2, 1],
'c': [1, 2, np.nan, 2, 1]
}, index=pd.DatetimeIndex([
datetime(2018, 1, 1),
datetime(2018, 1, 2),
datetime(2018, 1, 3),
datetime(2018, 1, 4),
datetime(2018, 1, 5)
]))
group_by = np.array(['g1', 'g1', 'g2'])
@njit
def i_or_col_pow_nb(i_or_col, x, pow):
return np.power(x, pow)
@njit
def pow_nb(x, pow):
return np.power(x, pow)
@njit
def nanmean_nb(x):
return np.nanmean(x)
@njit
def i_col_nanmean_nb(i, col, x):
return np.nanmean(x)
@njit
def i_nanmean_nb(i, x):
return np.nanmean(x)
@njit
def col_nanmean_nb(col, x):
return np.nanmean(x)
# ############# accessors.py ############# #
class TestAccessors:
def test_shuffle(self):
pd.testing.assert_series_equal(
df['a'].vbt.shuffle(seed=seed),
pd.Series(
np.array([2.0, np.nan, 3.0, 1.0, 4.0]),
index=df['a'].index,
name=df['a'].name
)
)
np.testing.assert_array_equal(
df['a'].vbt.shuffle(seed=seed).values,
nb.shuffle_1d_nb(df['a'].values, seed=seed)
)
pd.testing.assert_frame_equal(
df.vbt.shuffle(seed=seed),
pd.DataFrame(
np.array([
[2., 2., 2.],
[np.nan, 4., 1.],
[3., 3., 2.],
[1., np.nan, 1.],
[4., 1., np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_value",
[-1, 0., np.nan],
)
def test_fillna(self, test_value):
pd.testing.assert_series_equal(df['a'].vbt.fillna(test_value), df['a'].fillna(test_value))
pd.testing.assert_frame_equal(df.vbt.fillna(test_value), df.fillna(test_value))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_bshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.bshift(test_n), df['a'].shift(-test_n))
np.testing.assert_array_equal(
df['a'].vbt.bshift(test_n).values,
nb.bshift_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.bshift(test_n), df.shift(-test_n))
@pytest.mark.parametrize(
"test_n",
[1, 2, 3, 4, 5],
)
def test_fshift(self, test_n):
pd.testing.assert_series_equal(df['a'].vbt.fshift(test_n), df['a'].shift(test_n))
np.testing.assert_array_equal(
df['a'].vbt.fshift(test_n).values,
nb.fshift_1d_nb(df['a'].values, test_n)
)
pd.testing.assert_frame_equal(df.vbt.fshift(test_n), df.shift(test_n))
def test_diff(self):
pd.testing.assert_series_equal(df['a'].vbt.diff(), df['a'].diff())
np.testing.assert_array_equal(df['a'].vbt.diff().values, nb.diff_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.diff(), df.diff())
def test_pct_change(self):
pd.testing.assert_series_equal(df['a'].vbt.pct_change(), df['a'].pct_change(fill_method=None))
np.testing.assert_array_equal(df['a'].vbt.pct_change().values, nb.pct_change_1d_nb(df['a'].values))
pd.testing.assert_frame_equal(df.vbt.pct_change(), df.pct_change(fill_method=None))
def test_ffill(self):
pd.testing.assert_series_equal(df['a'].vbt.ffill(), df['a'].ffill())
pd.testing.assert_frame_equal(df.vbt.ffill(), df.ffill())
def test_product(self):
assert df['a'].vbt.product() == df['a'].product()
np.testing.assert_array_equal(df.vbt.product(), df.product())
def test_cumsum(self):
pd.testing.assert_series_equal(df['a'].vbt.cumsum(), df['a'].cumsum())
pd.testing.assert_frame_equal(df.vbt.cumsum(), df.cumsum())
def test_cumprod(self):
pd.testing.assert_series_equal(df['a'].vbt.cumprod(), df['a'].cumprod())
pd.testing.assert_frame_equal(df.vbt.cumprod(), df.cumprod())
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_min(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_min(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_min(test_window),
df.rolling(test_window).min()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_max(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_max(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_max(test_window),
df.rolling(test_window).max()
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_mean(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_mean(test_window, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.rolling_mean(test_window),
df.rolling(test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [0, 1]))
)
def test_rolling_std(self, test_window, test_minp, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df['a'].rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window, minp=test_minp, ddof=test_ddof),
df.rolling(test_window, min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_std(test_window),
df.rolling(test_window).std()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust",
list(product([1, 2, 3, 4, 5], [1, None], [False, True]))
)
def test_ewm_mean(self, test_window, test_minp, test_adjust):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window, minp=test_minp, adjust=test_adjust),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).mean()
)
pd.testing.assert_frame_equal(
df.vbt.ewm_mean(test_window),
df.ewm(span=test_window).mean()
)
@pytest.mark.parametrize(
"test_window,test_minp,test_adjust,test_ddof",
list(product([1, 2, 3, 4, 5], [1, None], [False, True], [0, 1]))
)
def test_ewm_std(self, test_window, test_minp, test_adjust, test_ddof):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df['a'].ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window, minp=test_minp, adjust=test_adjust, ddof=test_ddof),
df.ewm(span=test_window, min_periods=test_minp, adjust=test_adjust).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.ewm_std(test_window),
df.ewm(span=test_window).std()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_min(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_min(minp=test_minp),
df['a'].expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(minp=test_minp),
df.expanding(min_periods=test_minp).min()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_min(),
df.expanding().min()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_max(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_max(minp=test_minp),
df['a'].expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(minp=test_minp),
df.expanding(min_periods=test_minp).max()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_max(),
df.expanding().max()
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_mean(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_mean(minp=test_minp),
df['a'].expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(minp=test_minp),
df.expanding(min_periods=test_minp).mean()
)
pd.testing.assert_frame_equal(
df.vbt.expanding_mean(),
df.expanding().mean()
)
@pytest.mark.parametrize(
"test_minp,test_ddof",
list(product([1, 3], [0, 1]))
)
def test_expanding_std(self, test_minp, test_ddof):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df['a'].expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(minp=test_minp, ddof=test_ddof),
df.expanding(min_periods=test_minp).std(ddof=test_ddof)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_std(),
df.expanding().std()
)
def test_apply_along_axis(self):
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=0),
df.apply(pow_nb, args=(2,), axis=0, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.apply_along_axis(i_or_col_pow_nb, 2, axis=1),
df.apply(pow_nb, args=(2,), axis=1, raw=True)
)
@pytest.mark.parametrize(
"test_window,test_minp",
list(product([1, 2, 3, 4, 5], [1, None]))
)
def test_rolling_apply(self, test_window, test_minp):
if test_minp is None:
test_minp = test_window
pd.testing.assert_series_equal(
df['a'].vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df['a'].rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb, minp=test_minp),
df.rolling(test_window, min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(test_window, i_col_nanmean_nb),
df.rolling(test_window).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.rolling_apply(3, i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[2.75, 2.75, 2.75],
[np.nan, np.nan, np.nan]
]),
index=df.index,
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_minp",
[1, 3]
)
def test_expanding_apply(self, test_minp):
pd.testing.assert_series_equal(
df['a'].vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df['a'].expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb, minp=test_minp),
df.expanding(min_periods=test_minp).apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_col_nanmean_nb),
df.expanding().apply(nanmean_nb, raw=True)
)
pd.testing.assert_frame_equal(
df.vbt.expanding_apply(i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[np.nan, np.nan, np.nan],
[2.0, 2.0, 2.0],
[2.2857142857142856, 2.2857142857142856, 2.2857142857142856],
[2.4, 2.4, 2.4],
[2.1666666666666665, 2.1666666666666665, 2.1666666666666665]
]),
index=df.index,
columns=df.columns
)
)
def test_groupby_apply(self):
pd.testing.assert_series_equal(
df['a'].vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df['a'].groupby(np.asarray([1, 1, 2, 2, 3])).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_col_nanmean_nb),
df.groupby(np.asarray([1, 1, 2, 2, 3])).agg({
'a': lambda x: nanmean_nb(x.values),
'b': lambda x: nanmean_nb(x.values),
'c': lambda x: nanmean_nb(x.values)
}), # any clean way to do column-wise grouping in pandas?
)
def test_groupby_apply_on_matrix(self):
pd.testing.assert_frame_equal(
df.vbt.groupby_apply(np.asarray([1, 1, 2, 2, 3]), i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2., 2., 2.],
[2.8, 2.8, 2.8],
[1., 1., 1.]
]),
index=pd.Int64Index([1, 2, 3], dtype='int64'),
columns=df.columns
)
)
@pytest.mark.parametrize(
"test_freq",
['1h', '3d', '1w'],
)
def test_resample_apply(self, test_freq):
pd.testing.assert_series_equal(
df['a'].vbt.resample_apply(test_freq, i_col_nanmean_nb),
df['a'].resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply(test_freq, i_col_nanmean_nb),
df.resample(test_freq).apply(lambda x: nanmean_nb(x.values))
)
pd.testing.assert_frame_equal(
df.vbt.resample_apply('3d', i_nanmean_nb, on_matrix=True),
pd.DataFrame(
np.array([
[2.28571429, 2.28571429, 2.28571429],
[2., 2., 2.]
]),
index=pd.DatetimeIndex(['2018-01-01', '2018-01-04'], dtype='datetime64[ns]', freq='3D'),
columns=df.columns
)
)
def test_applymap(self):
@njit
def mult_nb(i, col, x):
return x * 2
pd.testing.assert_series_equal(
df['a'].vbt.applymap(mult_nb),
df['a'].map(lambda x: x * 2)
)
pd.testing.assert_frame_equal(
df.vbt.applymap(mult_nb),
df.applymap(lambda x: x * 2)
)
def test_filter(self):
@njit
def greater_nb(i, col, x):
return x > 2
pd.testing.assert_series_equal(
df['a'].vbt.filter(greater_nb),
df['a'].map(lambda x: x if x > 2 else np.nan)
)
pd.testing.assert_frame_equal(
df.vbt.filter(greater_nb),
df.applymap(lambda x: x if x > 2 else np.nan)
)
def test_apply_and_reduce(self):
@njit
def every_nth_nb(col, a, n):
return a[::n]
@njit
def sum_nb(col, a, b):
return np.nansum(a) + b
assert df['a'].vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)) == \
df['a'].iloc[::2].sum() + 3
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(every_nth_nb, sum_nb, apply_args=(2,), reduce_args=(3,)),
df.iloc[::2].sum().rename('apply_and_reduce') + 3
)
pd.testing.assert_series_equal(
df.vbt.apply_and_reduce(
every_nth_nb, sum_nb, apply_args=(2,),
reduce_args=(3,), wrap_kwargs=dict(time_units=True)),
(df.iloc[::2].sum().rename('apply_and_reduce') + 3) * day_dt
)
def test_reduce(self):
@njit
def sum_nb(col, a):
return np.nansum(a)
assert df['a'].vbt.reduce(sum_nb) == df['a'].sum()
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb),
df.sum().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, wrap_kwargs=dict(time_units=True)),
df.sum().rename('reduce') * day_dt
)
pd.testing.assert_series_equal(
df.vbt.reduce(sum_nb, group_by=group_by),
pd.Series([20.0, 6.0], index=['g1', 'g2']).rename('reduce')
)
@njit
def argmax_nb(col, a):
a = a.copy()
a[np.isnan(a)] = -np.inf
return np.argmax(a)
assert df['a'].vbt.reduce(argmax_nb, to_idx=True) == df['a'].idxmax()
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True),
df.idxmax().rename('reduce')
)
pd.testing.assert_series_equal(
df.vbt.reduce(argmax_nb, to_idx=True, flatten=True, group_by=group_by),
pd.Series(['2018-01-02', '2018-01-02'], dtype='datetime64[ns]', index=['g1', 'g2']).rename('reduce')
)
@njit
def min_and_max_nb(col, a):
out = np.empty(2)
out[0] = np.nanmin(a)
out[1] = np.nanmax(a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.Series([np.nanmin(df['a']), np.nanmax(df['a'])], index=['min', 'max'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
df.apply(lambda x: pd.Series(np.asarray([np.nanmin(x), np.nanmax(x)]), index=['min', 'max']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
min_and_max_nb, to_array=True, group_by=group_by,
wrap_kwargs=dict(name_or_index=['min', 'max'])),
pd.DataFrame([[1.0, 1.0], [4.0, 2.0]], index=['min', 'max'], columns=['g1', 'g2'])
)
@njit
def argmin_and_argmax_nb(col, a):
# nanargmin and nanargmax
out = np.empty(2)
_a = a.copy()
_a[np.isnan(_a)] = np.inf
out[0] = np.argmin(_a)
_a = a.copy()
_a[np.isnan(_a)] = -np.inf
out[1] = np.argmax(_a)
return out
pd.testing.assert_series_equal(
df['a'].vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.Series([df['a'].idxmin(), df['a'].idxmax()], index=['idxmin', 'idxmax'], name='a')
)
pd.testing.assert_frame_equal(
df.vbt.reduce(
argmin_and_argmax_nb, to_idx=True, to_array=True,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
df.apply(lambda x: pd.Series(np.asarray([x.idxmin(), x.idxmax()]), index=['idxmin', 'idxmax']), axis=0)
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='C', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-02', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.reduce(argmin_and_argmax_nb, to_idx=True, to_array=True,
flatten=True, order='F', group_by=group_by,
wrap_kwargs=dict(name_or_index=['idxmin', 'idxmax'])),
pd.DataFrame([['2018-01-01', '2018-01-01'], ['2018-01-04', '2018-01-02']],
dtype='datetime64[ns]', index=['idxmin', 'idxmax'], columns=['g1', 'g2'])
)
def test_squeeze_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.squeeze_grouped(i_col_nanmean_nb, group_by=group_by),
pd.DataFrame([
[1.0, 1.0],
[3.0, 2.0],
[3.0, np.nan],
[3.0, 2.0],
[1.0, 1.0]
], index=df.index, columns=['g1', 'g2'])
)
def test_flatten_grouped(self):
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='C'),
pd.DataFrame([
[1.0, 1.0],
[np.nan, np.nan],
[2.0, 2.0],
[4.0, np.nan],
[3.0, np.nan],
[3.0, np.nan],
[4.0, 2.0],
[2.0, np.nan],
[np.nan, 1.0],
[1.0, np.nan]
], index=np.repeat(df.index, 2), columns=['g1', 'g2'])
)
pd.testing.assert_frame_equal(
df.vbt.flatten_grouped(group_by=group_by, order='F'),
pd.DataFrame([
[1.0, 1.0],
[2.0, 2.0],
[3.0, np.nan],
[4.0, 2.0],
[np.nan, 1.0],
[np.nan, np.nan],
[4.0, np.nan],
[3.0, np.nan],
[2.0, np.nan],
[1.0, np.nan]
], index=np.tile(df.index, 2), columns=['g1', 'g2'])
)
@pytest.mark.parametrize(
"test_name,test_func,test_func_nb",
[
('min', lambda x, **kwargs: x.min(**kwargs), nb.nanmin_nb),
('max', lambda x, **kwargs: x.max(**kwargs), nb.nanmax_nb),
('mean', lambda x, **kwargs: x.mean(**kwargs), nb.nanmean_nb),
('median', lambda x, **kwargs: x.median(**kwargs), nb.nanmedian_nb),
('std', lambda x, **kwargs: x.std(**kwargs, ddof=0), nb.nanstd_nb),
('count', lambda x, **kwargs: x.count(**kwargs), nb.nancnt_nb),
('sum', lambda x, **kwargs: x.sum(**kwargs), nb.nansum_nb)
],
)
def test_funcs(self, test_name, test_func, test_func_nb):
# numeric
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack()),
test_func(df['c'])
], index=['g1', 'g2']).rename(test_name)
)
np.testing.assert_array_equal(test_func(df).values, test_func_nb(df.values))
pd.testing.assert_series_equal(
test_func(df.vbt, wrap_kwargs=dict(time_units=True)),
test_func(df).rename(test_name) * day_dt
)
# boolean
bool_ts = df == df
assert test_func(bool_ts['a'].vbt) == test_func(bool_ts['a'])
pd.testing.assert_series_equal(
test_func(bool_ts.vbt),
test_func(bool_ts).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(bool_ts.vbt, wrap_kwargs=dict(time_units=True)),
test_func(bool_ts).rename(test_name) * day_dt
)
@pytest.mark.parametrize(
"test_name,test_func",
[
('idxmin', lambda x, **kwargs: x.idxmin(**kwargs)),
('idxmax', lambda x, **kwargs: x.idxmax(**kwargs))
],
)
def test_arg_funcs(self, test_name, test_func):
assert test_func(df['a'].vbt) == test_func(df['a'])
pd.testing.assert_series_equal(
test_func(df.vbt),
test_func(df).rename(test_name)
)
pd.testing.assert_series_equal(
test_func(df.vbt, group_by=group_by),
pd.Series([
test_func(df[['a', 'b']].stack())[0],
test_func(df['c'])
], index=['g1', 'g2'], dtype='datetime64[ns]').rename(test_name)
)
def test_describe(self):
pd.testing.assert_series_equal(
df['a'].vbt.describe(),
df['a'].describe()
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=None),
df.describe(percentiles=None)
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=[]),
df.describe(percentiles=[])
)
test_against = df.describe(percentiles=np.arange(0, 1, 0.1))
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1)),
test_against
)
pd.testing.assert_frame_equal(
df.vbt.describe(percentiles=np.arange(0, 1, 0.1), group_by=group_by),
pd.DataFrame({
'g1': df[['a', 'b']].stack().describe(percentiles=np.arange(0, 1, 0.1)).values,
'g2': df['c'].describe(percentiles=np.arange(0, 1, 0.1)).values
}, index=test_against.index)
)
def test_drawdown(self):
pd.testing.assert_series_equal(
df['a'].vbt.drawdown(),
df['a'] / df['a'].expanding().max() - 1
)
pd.testing.assert_frame_equal(
df.vbt.drawdown(),
df / df.expanding().max() - 1
)
def test_drawdowns(self):
assert type(df['a'].vbt.drawdowns) is vbt.Drawdowns
assert df['a'].vbt.drawdowns.wrapper.freq == df['a'].vbt.wrapper.freq
assert df['a'].vbt.drawdowns.wrapper.ndim == df['a'].ndim
assert df.vbt.drawdowns.wrapper.ndim == df.ndim
def test_to_mapped_array(self):
np.testing.assert_array_equal(
df.vbt.to_mapped_array().values,
np.array([1., 2., 3., 4., 4., 3., 2., 1., 1., 2., 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().col_arr,
np.array([0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array().idx_arr,
np.array([0, 1, 2, 3, 1, 2, 3, 4, 0, 1, 3, 4])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).values,
np.array([1., 2., 3., 4., np.nan, np.nan, 4., 3., 2., 1., 1., 2., np.nan, 2., 1.])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).col_arr,
np.array([0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2])
)
np.testing.assert_array_equal(
df.vbt.to_mapped_array(dropna=False).idx_arr,
np.array([0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
)
def test_zscore(self):
pd.testing.assert_series_equal(
df['a'].vbt.zscore(),
(df['a'] - df['a'].mean()) / df['a'].std(ddof=0)
)
pd.testing.assert_frame_equal(
df.vbt.zscore(),
(df - df.mean()) / df.std(ddof=0)
)
def test_split(self):
splitter = TimeSeriesSplit(n_splits=2)
(train_df, train_indexes), (test_df, test_indexes) = df['a'].vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, 1.0],
[2.0, 2.0],
[3.0, 3.0],
[np.nan, 4.0]
]),
index=pd.RangeIndex(start=0, stop=4, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03'],
dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04'],
dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
train_indexes[i],
target[i]
)
pd.testing.assert_frame_equal(
test_df,
pd.DataFrame(
np.array([
[4.0, np.nan]
]),
index=pd.RangeIndex(start=0, stop=1, step=1),
columns=pd.Int64Index([0, 1], dtype='int64', name='split_idx')
)
)
target = [
pd.DatetimeIndex(['2018-01-04'], dtype='datetime64[ns]', name='split_0', freq=None),
pd.DatetimeIndex(['2018-01-05'], dtype='datetime64[ns]', name='split_1', freq=None)
]
for i in range(len(target)):
pd.testing.assert_index_equal(
test_indexes[i],
target[i]
)
(train_df, train_indexes), (test_df, test_indexes) = df.vbt.split(splitter)
pd.testing.assert_frame_equal(
train_df,
pd.DataFrame(
np.array([
[1.0, np.nan, 1.0, 1.0, np.nan, 1.0],
[2.0, 4.0, 2.0, 2.0, 4.0, 2.0],
[3.0, 3.0, np.nan, 3.0, 3.0, np.nan],
[np.nan, np.nan, np.nan, 4.0, 2.0, 2.0]
]),
index= | pd.RangeIndex(start=0, stop=4, step=1) | pandas.RangeIndex |
#!/usr/bin/env python
# coding: utf-8
# In[1]:
import requests
import json
import pandas as pd
import numpy as np
from pandas.io.json import json_normalize
import folium
import matplotlib.pyplot as plt
from folium.plugins import MarkerCluster
import warnings
warnings.filterwarnings(action='ignore')
# In[2]:
address= | pd.DataFrame() | pandas.DataFrame |
# https://scikit-learn.org/stable/auto_examples/inspection/plot_permutation_importance_multicollinear.html#sphx-glr-auto-examples-inspection-plot-permutation-importance-multicollinear-py
# https://orbi.uliege.be/bitstream/2268/155642/1/louppe13.pdf
# https://proceedings.neurips.cc/paper/2019/file/702cafa3bb4c9c86e4a3b6834b45aedd-Paper.pdf
# https://indico.cern.ch/event/443478/contributions/1098668/attachments/1157598/1664920/slides.pdf
import time
import warnings
from collections import defaultdict
from typing import Callable, Tuple
import joblib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from scipy.cluster import hierarchy
from scipy.spatial.distance import squareform
from scipy.stats import spearmanr
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier
from sklearn.inspection import permutation_importance
from sklearn.model_selection import cross_val_score, train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.tree import DecisionTreeClassifier
def evaluate_cv(model, dataset_x, dataset_y, cv=None):
start_time = time.time()
scores = cross_val_score(model, dataset_x, dataset_y, cv=cv)
elapsed_time = time.time() - start_time
# print(f"mean CV score: {np.mean(scores):.3f} (in {elapsed_time:.3f} seconds)")
return scores
def check_class_imbalance(dataset_y):
_, occurrences = np.unique(dataset_y, return_counts=True)
print(f"{occurrences = }")
# highest fraction of class over samples
imbalance = np.max(occurrences / dataset_y.size)
print(f"{np.max(occurrences / dataset_y.size) = :.4f}")
print(f"{np.min(occurrences / dataset_y.size) = :.4f}")
print(f". . . . . . . . . . . . 1 / #classes = {1/(np.max(dataset_y)+1):.4f}")
return imbalance
def compare_score_imbalance(score: float, imbalance: float):
if score < 1.5 * imbalance:
warnings.warn(
f"{score = :.3f} is below {1.5*imbalance:.3f}, results may not be "
f"indicative (class_{imbalance = :.3f})"
)
else:
print(f"{score = :.3f} ({imbalance = :.3f})")
def check_correlation(dataset_x):
for i in range(dataset_x.shape[1]):
for j in range(i + 1, dataset_x.shape[1]):
coeff = np.corrcoef(dataset_x[:, i], dataset_x[:, j])[0, 1]
if np.abs(coeff) > 0.8:
# dataset_x[:, j] = np.random.rand(dataset_x.shape[0])
print(f"{i=} {j=} {coeff=}")
def new_model(
random_state,
n_estimators: int = 1000,
max_features: int = None,
max_depth: int = None,
) -> ExtraTreesClassifier:
return ExtraTreesClassifier(
n_estimators=n_estimators,
max_features=max_features,
max_depth=max_depth,
random_state=random_state,
)
def get_feature_idx(dataset_x, dataset_y, start=(), random_state=48):
cv = 5
def get_score_partial_features(indices: tuple):
partial_x = dataset_x[:, indices]
# model = new_model(random_state)
# model = new_model(random_state=random_state)
model = ExtraTreesClassifier(random_state=random_state)
return indices[-1], np.mean(evaluate_cv(model, partial_x, dataset_y, cv))
delayed_score = joblib.delayed(get_score_partial_features)
last_score = 0.0
selected = tuple(start)
candidates = list(set(range(dataset_x.shape[1])) - set(selected))
while True:
results = joblib.Parallel(n_jobs=-1)(
delayed_score(selected + (c,)) for c in candidates
)
best_idx, best_score = results[0]
for idx_, score_ in results[1:]:
if score_ > best_score:
best_score = score_
best_idx = idx_
if best_score - last_score < 0.01:
break
selected += (best_idx,)
candidates.remove(best_idx)
print(f"{best_score=:.3f} {selected=}")
last_score = best_score
return selected
def add_input_noise(dataset_x: np.ndarray, rel_scale: float):
scale = rel_scale * np.mean(np.abs(dataset_x), axis=1)
# numpy needs the first axis to be the same as the scale
size = dataset_x.shape[::-1]
noise = np.random.normal(scale=scale, size=size).T
return dataset_x + noise
def do_plot(dataset_x, dataset_y, stratify_classes=True, random_state=48):
model = new_model(random_state)
cv = 10
# check_correlation(dataset_x)
# imbalance = check_class_imbalance(dataset_y)
# split dataset
stratify = dataset_y if stratify_classes else None
X_train, X_test, Y_train, Y_test = train_test_split(
dataset_x,
dataset_y,
test_size=0.33,
random_state=random_state,
stratify=stratify,
)
# cv_scores = evaluate_cv(model, dataset_x, dataset_y, cv)
# print(
# f"{np.min(cv_scores)=}",
# f"{np.mean(cv_scores)=}",
# f"{np.median(cv_scores)=}",
# f"{np.max(cv_scores)=}",
# )
# compare_score_imbalance(np.mean(cv_scores), imbalance)
model.fit(X_train, Y_train)
ts_score = model.score(X_test, Y_test)
print(f"{ts_score=}")
feature_names = list(map(str, range(dataset_x.shape[1])))
# find the most important features (see sklearn doc)
# result = permutation_importance(
# model, X_train, Y_train, n_repeats=10, random_state=42
# )
# perm_sorted_idx = result.importances_mean.argsort()
# tree_importance_sorted_idx = np.argsort(model.feature_importances_)
# tree_indices = np.arange(0, len(model.feature_importances_)) + 0.5
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
# ax1.barh(
# tree_indices, model.feature_importances_[tree_importance_sorted_idx], height=0.7
# )
# ax1.set_yticks(tree_indices)
# ax1.set_yticklabels([feature_names[i] for i in tree_importance_sorted_idx])
# ax1.set_ylim((0, len(model.feature_importances_)))
# ax2.boxplot(
# result.importances[perm_sorted_idx].T,
# vert=False,
# labels=[feature_names[i] for i in perm_sorted_idx],
# )
# fig.tight_layout()
# plt.show()
# find the correlated features
# fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 8))
fig, ax1 = plt.subplots(1, 1, figsize=(12, 8))
corr = spearmanr(dataset_x).correlation
# Ensure the correlation matrix is symmetric
corr = (corr + corr.T) / 2
np.fill_diagonal(corr, 1)
# We convert the correlation matrix to a distance matrix before performing
# hierarchical clustering using Ward's linkage.
distance_matrix = 1 - np.abs(corr)
dist_linkage = hierarchy.ward(squareform(distance_matrix))
dendro = hierarchy.dendrogram(
dist_linkage, labels=feature_names, ax=ax1, leaf_rotation=90
)
# dendro_idx = np.arange(0, len(dendro["ivl"]))
# ax2.imshow(corr[dendro["leaves"], :][:, dendro["leaves"]])
# ax2.set_xticks(dendro_idx)
# ax2.set_yticks(dendro_idx)
# ax2.set_xticklabels(dendro["ivl"], rotation="vertical")
# ax2.set_yticklabels(dendro["ivl"])
fig.tight_layout()
plt.show()
# for threshold in [3.5, 2.5, 1.5, 1.0, 0.8, 0.6, 0.4, 0.2, 0.1, 0.05]:
for threshold in [0.4]:
cluster_ids = hierarchy.fcluster(dist_linkage, threshold, criterion="distance")
cluster_id_to_feature_ids = defaultdict(list)
for idx, cluster_id in enumerate(cluster_ids):
cluster_id_to_feature_ids[cluster_id].append(idx)
selected_features = [v[0] for v in cluster_id_to_feature_ids.values()]
X_train_sel = X_train[:, selected_features]
X_test_sel = X_test[:, selected_features]
clf_sel = new_model(random_state=random_state)
clf_sel.fit(X_train_sel, Y_train)
score = clf_sel.score(X_test_sel, Y_test)
print(f"{threshold=:.3f} {score=:.3f} {len(selected_features)=}")
print(f"{selected_features=}")
def get_mdi_importance(ds_x, ds_y, model):
model.fit(ds_x, ds_y)
try:
importances = model.feature_importances_
if hasattr(model, "estimators_"):
std = np.std(
[tree.feature_importances_ for tree in model.estimators_], axis=0
)
else:
std = np.full_like(importances, np.nan)
return importances, std
except AttributeError as _:
return None
def get_permutation_importance(ds_x, ds_y, model, random_state):
# permutation importance
X_train, X_test, Y_train, Y_test = train_test_split(
ds_x, ds_y, test_size=0.33, random_state=random_state
)
model.fit(X_train, Y_train)
result = permutation_importance(
model, X_test, Y_test, random_state=random_state, n_repeats=10, n_jobs=-1,
)
return (result.importances_mean, result.importances_std)
def get_feature_importances(ds_x, ds_y, model_fn: Callable, random_state):
return (
get_permutation_importance(ds_x, ds_y, model_fn(random_state), random_state),
get_mdi_importance(ds_x, ds_y, model_fn(random_state)),
)
def study_model(ds_x, ds_y, random_state):
model_builders = [
lambda: ExtraTreesClassifier(
n_estimators=1000, max_features=None, n_jobs=-1, random_state=random_state
),
lambda: RandomForestClassifier(
n_estimators=1000, max_features=None, n_jobs=-1, random_state=random_state
),
lambda: MLPClassifier(hidden_layer_sizes=(128, 128), random_state=random_state),
]
df = pd.DataFrame()
# TODO
def add_features(
dataset_x: np.ndarray,
*,
n_comb_lin_droppout: int = 0,
n_noise: int = 0,
n_lin_comb: int = 0,
n_redundant: int = 0,
) -> np.ndarray:
"""add some correlated or noisy features to a dataset.
Args:
dataset_x (np.ndarray): original dataset
n_comb_lin_droppout (int): first apply a 30% dropout to the dataset and
then apply a linear combination with a small noise (scale=0.1*std)
n_noise (int): number of gaussian noise features to add (scale=1.0)
n_lin_comb (int): linear combination of the features with added
gaussian noise (scale=0.1*std) to add
n_redundant (int): number of redundant features to add with a gaussian
noise (scale=0.1*std)
Returns:
np.ndarray: the dataset, columns are added in order, at the right edge
"""
def _dropout() -> np.ndarray:
"compute one correlated noisy feature column"
weights = np.random.normal(loc=0, scale=1, size=(dataset_x.shape[1], 1))
dropout = np.copy(dataset_x)
dropout[np.random.rand(*dropout.shape) < 0.3] = 0
feature = np.dot(dropout, weights)
return feature + 0.1 * np.std(feature) * _noise()
def _noise() -> np.ndarray:
"compute one complete noise feature column"
return np.random.normal(size=(dataset_x.shape[0], 1))
def _lin_comb() -> np.ndarray:
weights = np.random.normal(loc=0, scale=1, size=(dataset_x.shape[1], 1))
feature = np.dot(dataset_x, weights)
return feature + 0.1 * np.std(feature) * _noise()
def _redundant() -> np.ndarray:
idx = np.random.randint(dataset_x.shape[1])
feature = dataset_x[:, idx : idx + 1]
return feature + 0.1 * np.std(feature) * _noise()
feature_columns = [dataset_x]
feature_columns.extend(_dropout() for _ in range(n_comb_lin_droppout))
feature_columns.extend(_noise() for _ in range(n_noise))
feature_columns.extend(_lin_comb() for _ in range(n_lin_comb))
feature_columns.extend(_redundant() for _ in range(n_redundant))
merged = np.concatenate(feature_columns, axis=1)
assert (
dataset_x.shape[0] == merged.shape[0]
), "invalid number of objects after transformation"
assert (
dataset_x.shape[1] + n_comb_lin_droppout + n_noise + n_lin_comb + n_redundant
== merged.shape[1]
), "invalid number of features after transformation"
return merged
def _compare_mdi_perm(
dataset_x, dataset_y, feature_names, model_fn, random_state
) -> Tuple[pd.DataFrame, pd.DataFrame]:
# two series: one for MDI, one for MDA
(mda_mean, mda_std), (mdi_mean, mdi_std) = get_feature_importances(
dataset_x, dataset_y, model_fn, random_state
)
mean = | pd.DataFrame() | pandas.DataFrame |
"""
This is the dashboard of CEA
"""
from __future__ import division
from __future__ import print_function
import json
import os
import pandas as pd
import numpy as np
import cea.config
import cea.inputlocator
from cea.plots.optimization.cost_analysis_curve_centralized import cost_analysis_curve_centralized
from cea.plots.optimization.pareto_capacity_installed import pareto_capacity_installed
from cea.plots.optimization.pareto_curve import pareto_curve
from cea.optimization.lca_calculations import lca_calculations
from cea.plots.optimization.pareto_curve_over_generations import pareto_curve_over_generations
from cea.analysis.multicriteria.optimization_post_processing.individual_configuration import supply_system_configuration
from cea.technologies.solar.photovoltaic import calc_Cinv_pv
from cea.optimization.constants import SIZING_MARGIN
from cea.analysis.multicriteria.optimization_post_processing.locating_individuals_in_generation_script import locating_individuals_in_generation_script
from math import ceil, log
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, Architecture and Building Systems - ETH Zurich"
__credits__ = ["<NAME>", "<NAME>"]
__license__ = "MIT"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Production"
def plots_main(locator, config):
# local variables
generation = config.plots_optimization.generation
categories = config.plots_optimization.categories
scenario = config.scenario
type_of_network = config.plots_optimization.network_type
# generate plots
category = "optimization-overview"
if not os.path.exists(locator.get_address_of_individuals_of_a_generation(generation)):
data_address = locating_individuals_in_generation_script(generation, locator)
else:
data_address = pd.read_csv(locator.get_address_of_individuals_of_a_generation(generation))
# initialize class
plots = Plots(locator, generation, config, type_of_network, data_address)
if "pareto_curve" in categories:
plots.pareto_curve_for_one_generation(category)
if "system_sizes" in categories:
plots.cost_analysis_central_decentral(category)
if config.plots_optimization.network_type == 'DH':
plots.comparison_capacity_installed_heating_supply_system_one_generation(category)
if config.plots_optimization.network_type == 'DC':
plots.comparison_capacity_installed_cooling_supply_system_one_generation(category)
if "costs_analysis" in categories:
plots.cost_analysis_central_decentral(category)
if config.plots_optimization.network_type == 'DH':
plots.comparison_capex_opex_heating_supply_system_for_one_generation_per_production_unit(category)
if config.plots_optimization.network_type == 'DC':
plots.comparison_capex_opex_cooling_supply_system_for_one_generation_per_production_unit(category)
return
class Plots():
def __init__(self, locator, generation, config, type_of_network, data_address):
# local variables
self.locator = locator
self.config = config
self.generation = generation
self.network_type = type_of_network
self.final_generation = [generation]
self.data_address = data_address
# fields of loads in the systems of heating, cooling and electricity
self.analysis_fields_cost_cooling_centralized = ["Capex_a_ACH",
"Capex_a_CCGT",
"Capex_a_CT",
"Capex_a_Tank",
"Capex_a_VCC",
"Capex_a_VCC_backup",
"Capex_a_pump",
"Capex_a_PV",
"Network_costs",
"Substation_costs",
"Opex_var_ACH",
"Opex_var_CCGT",
"Opex_var_CT",
"Opex_var_Lake",
"Opex_var_VCC",
"Opex_var_VCC_backup",
"Opex_var_pumps",
"Opex_var_PV",
"Electricitycosts_for_appliances",
"Electricitycosts_for_hotwater"]
self.analysis_fields_cost_central_decentral = ["Capex_Centralized",
"Capex_Decentralized",
"Opex_Centralized",
"Opex_Decentralized"]
self.analysis_fields_cost_heating_centralized = ["Capex_SC",
"Capex_PVT",
"Capex_furnace",
"Capex_Boiler_Total",
"Capex_CHP",
"Capex_Lake",
"Capex_Sewage",
"Capex_pump",
"Opex_HP_Sewage",
"Opex_HP_Lake",
"Opex_GHP",
"Opex_CHP_Total",
"Opex_Furnace_Total",
"Opex_Boiler_Total",
"Electricity_Costs"]
self.analysis_fields_individual_heating = ['Base_boiler_BG_capacity_W', 'Base_boiler_NG_capacity_W', 'CHP_BG_capacity_W',
'CHP_NG_capacity_W', 'Furnace_dry_capacity_W', 'Furnace_wet_capacity_W',
'GHP_capacity_W', 'HP_Lake_capacity_W', 'HP_Sewage_capacity_W',
'PVT_capacity_W', 'PV_capacity_W', 'Peak_boiler_BG_capacity_W',
'Peak_boiler_NG_capacity_W', 'SC_ET_capacity_W', 'SC_FP_capacity_W',
'Disconnected_Boiler_BG_capacity_W',
'Disconnected_Boiler_NG_capacity_W',
'Disconnected_FC_capacity_W',
'Disconnected_GHP_capacity_W']
self.analysis_fields_individual_cooling = ['Lake_kW', 'VCC_LT_kW', 'VCC_HT_kW', 'single_effect_ACH_LT_kW',
'single_effect_ACH_HT_kW', 'DX_kW', 'CHP_CCGT_thermal_kW', 'Storage_thermal_kW']
self.data_processed = self.preprocessing_generations_data()
self.data_processed_cost_centralized = self.preprocessing_final_generation_data_cost_centralized(self.locator,
self.data_processed,
self.config, self.data_address, self.generation)
self.data_processed_capacities = self.preprocessing_capacities_data(self.locator, self.data_processed, self.generation, self.network_type, config, self.data_address)
def preprocessing_generations_data(self):
generation = self.final_generation[0]
data_processed = []
with open(self.locator.get_optimization_checkpoint(generation), "rb") as fp:
data = json.load(fp)
# get lists of data for performance values of the population
costs_Mio = [round(objectives[0] / 1000000, 2) for objectives in
data['population_fitness']] # convert to millions
individual_names = ['ind' + str(i) for i in range(len(costs_Mio))]
individual_barcode = [[str(ind) if type(ind) == float else str(ind) for ind in
individual] for individual in data['population']]
def_individual_barcode = pd.DataFrame({'Name': individual_names,
'individual_barcode': individual_barcode}).set_index("Name")
data_processed = {'individual_barcode': def_individual_barcode}
return data_processed
def preprocessing_capacities_data(self, locator, data_generation, generation, network_type, config, data_address):
column_names = ['Lake_kW', 'VCC_LT_kW', 'VCC_HT_kW', 'single_effect_ACH_LT_kW',
'single_effect_ACH_HT_kW', 'DX_kW', 'CHP_CCGT_thermal_kW',
'Storage_thermal_kW', 'CT_kW', 'Buildings Connected Share']
individual_index = data_generation['individual_barcode'].index.values
capacities_of_generation =pd.DataFrame(np.zeros([len(individual_index), len(column_names)]), columns=column_names)
for i, ind in enumerate(individual_index):
data_address_individual = data_address[data_address['individual_list'] == ind]
generation_pointer = data_address_individual['generation_number_address'].values[0] # points to the correct file to be referenced from optimization folders
individual_pointer = data_address_individual['individual_number_address'].values[0]
district_supply_sys, building_connectivity = supply_system_configuration(generation_pointer, individual_pointer, locator, network_type, config)
for name in column_names:
if name is 'Buildings Connected Share':
connected_buildings = len(building_connectivity.loc[building_connectivity.Type == "CENTRALIZED"])
total_buildings = connected_buildings + len(
building_connectivity.loc[building_connectivity.Type == "DECENTRALIZED"])
capacities_of_generation.iloc[i][name] = np.float(connected_buildings * 100 / total_buildings)
else:
capacities_of_generation.iloc[i][name] = district_supply_sys[name].sum()
capacities_of_generation['indiv'] = individual_index
capacities_of_generation.set_index('indiv', inplace=True)
return {'capacities_of_final_generation': capacities_of_generation}
def preprocessing_final_generation_data_cost_centralized(self, locator, data_raw, config, data_address, generation):
total_demand = pd.read_csv(locator.get_total_demand())
building_names = total_demand.Name.values
df_all_generations = pd.read_csv(locator.get_optimization_all_individuals())
preprocessing_costs = pd.read_csv(locator.get_preprocessing_costs())
# The current structure of CEA has the following columns saved, in future, this will be slightly changed and
# correspondingly these columns_of_saved_files needs to be changed
columns_of_saved_files = ['CHP/Furnace', 'CHP/Furnace Share', 'Base Boiler',
'Base Boiler Share', 'Peak Boiler', 'Peak Boiler Share',
'Heating Lake', 'Heating Lake Share', 'Heating Sewage', 'Heating Sewage Share', 'GHP',
'GHP Share',
'Data Centre', 'Compressed Air', 'PV', 'PV Area Share', 'PVT', 'PVT Area Share', 'SC_ET',
'SC_ET Area Share', 'SC_FP', 'SC_FP Area Share', 'DHN Temperature', 'DHN unit configuration',
'Lake Cooling', 'Lake Cooling Share', 'VCC Cooling', 'VCC Cooling Share',
'Absorption Chiller', 'Absorption Chiller Share', 'Storage', 'Storage Share',
'DCN Temperature', 'DCN unit configuration']
for i in building_names: # DHN
columns_of_saved_files.append(str(i) + ' DHN')
for i in building_names: # DCN
columns_of_saved_files.append(str(i) + ' DCN')
individual_index = data_raw['individual_barcode'].index.values
if config.plots_optimization.network_type == 'DH':
data_activation_path = os.path.join(
locator.get_optimization_slave_investment_cost_detailed(1, 1))
df_heating_costs = | pd.read_csv(data_activation_path) | pandas.read_csv |
import pandas as pd
import streamlit as st
import yfinance as yf
@st.experimental_memo(max_entries=1000, show_spinner=False)
def get_asset_splits(ticker, cache_date):
return yf.Ticker(ticker).actions.loc[:, 'Stock Splits']
@st.experimental_memo(max_entries=50, show_spinner=False)
def get_historical_prices(tickers, start, cache_date):
assert len(tickers) == len(set(tickers))
# fix for penny sterling edgecase
if 'GBXUSD=X' in tickers:
index = tickers.index('GBXUSD=X')
if 'GBPUSD=X' not in tickers:
tickers[index] = 'GBPUSD=X'
return (
yf.download(tickers, start=start)
.loc[:, 'Close']
.assign(**{'GBXUSD=X': lambda x: x['GBPUSD=X'] / 100})
.drop(columns='GBPUSD=X')
)
else:
del tickers[index]
return (
yf.download(tickers, start=start)
.loc[:, 'Close']
.assign(**{'GBXUSD=X': lambda x: x['GBPUSD=X'] / 100})
)
return yf.download(tickers, start=start).loc[:, 'Close']
def correct_asset_amount_affected_by_split(df: pd.DataFrame, ticker_types: pd.Series):
df = df.copy()
for ticker in df.ticker.unique():
if ticker_types[ticker] == 'CRYPTO':
continue
# there can only be one split a day, so cache_date ensures we only download split data once a day
splits = get_asset_splits(ticker, cache_date=str( | pd.Timestamp.now() | pandas.Timestamp.now |
#%% [markdown] #--------------------------------------------------
## Equity Premium and Machine
#%% #--------------------------------------------------
import warnings
import matplotlib.pyplot as plt
plt.rcParams['figure.figsize'] = [10, 5]
import math
import time
import datetime
import pandas as pd
import numpy as np
import matplotlib as mpl
mpl.rcParams['patch.force_edgecolor'] = True
import seaborn as sns
sns.set()
from pandas.tseries.offsets import MonthEnd # To Determine the End of the Corresponding Month
import sys # To caclulate memory usage
import os
dir = os.getcwd()+'\\notebooks\\first_analysis'
# dir = 'C:\Research\Google Search Volume'
#dir = os.path.dirname(os.path.abspath(__file__))
#os.path.realpath(sys.argv[0])
#'E:/Research/Equity Premium and Machine Learning'
#dir = "C:/Users/vonNe/Google Drive/Data Science/Projects/Equity Premium and Machine Learning"
#dir = 'D:/Ravenpack'
os.chdir(dir)
os.makedirs(dir + '\\temp', exist_ok = True)
os.makedirs(dir + '\\out\\temp', exist_ok = True)
os.makedirs(dir + '\\in', exist_ok = True)
# Cross-validation Parameter
K = 10
# Share of Sample as Test
TsizeInv = 15
test_size= 1/TsizeInv
# Add interactions or not
Poly = 1
# Period
Period = 1974
#Test New Line
#%% #--------------------------------------------------
df = pd.read_csv('in/rapach_2013.csv', na_values = ['NaN'])
df.rename( index=str, columns={"date": "ym"}, inplace=True)
df['date'] = | pd.to_datetime(df['ym'],format='%Y%m') | pandas.to_datetime |
"""For training the comment spam classifier.
"""
import os
import numpy
from pandas import DataFrame
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.pipeline import Pipeline
from sklearn.model_selection import KFold
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.externals import joblib
from pygameweb.db import _get_session
from pygameweb.comment.models import CommentPost
def get_comment_data():
trans, session = _get_session()
spam = session.query(CommentPost).filter(CommentPost.is_spam == True).all()
ham = session.query(CommentPost).filter(CommentPost.is_spam != True).all()
return [(spam, 'spam'), (ham, 'ham')]
def build_df(comments, classification):
"""Creates a dataframe from the comments.
:param classification: either 'spam', or 'ham'.
"""
rows = []
index = []
for comment in comments:
rows.append({'text': comment.message, 'class': classification})
index.append(comment.id)
return DataFrame(rows, index=index)
def process_comment_data(comment_data):
data = | DataFrame({'text': [], 'class': []}) | pandas.DataFrame |
from __future__ import print_function
try:
input = raw_input
except NameError:
pass
import argparse
import pc_lib_api
import pc_lib_general
import json
import pandas
import time
import sys
from datetime import datetime, date
# --Execution Block-- #
# --Parse command line arguments-- #
parser = argparse.ArgumentParser(prog='rltoolbox')
parser.add_argument(
'-u',
'--username',
type=str,
help='*Required* - Prisma Cloud API Access Key ID that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-p',
'--password',
type=str,
help='*Required* - Prisma Cloud API Secret Key that you want to set to access your Prisma Cloud account.')
parser.add_argument(
'-url',
'--uiurl',
type=str,
help='*Required* - Base URL used in the UI for connecting to Prisma Cloud. '
'Formatted as app.prismacloud.io or app2.prismacloud.io or app.eu.prismacloud.io, etc. '
'You can also input the api version of the URL if you know it and it will be passed through.')
parser.add_argument(
'-y',
'--yes',
action='store_true',
help='(Optional) - Override user input for verification (auto answer for yes).')
parser.add_argument(
'--detailed',
action='store_true',
help='(Optional) - Detailed alerts response.')
parser.add_argument(
'--matrixmode',
action='store_true',
help='(Optional) - Print out JSON responses.')
parser.add_argument(
'-fas',
'--alertstatus',
type=str,
help='(Optional) - Filter - Alert Status.')
parser.add_argument(
'-aid',
'--alertid',
type=str,
help='(Optional) - Filter - Alert ID.')
parser.add_argument(
'-fpt',
'--policytype',
type=str,
help='(Optional) - Filter - Policy Type.')
parser.add_argument(
'-fpcs',
'--policycomplianceStandard',
type=str,
help='(Optional) - Filter - Policy Compliance Standard.')
parser.add_argument(
'-fps',
'--policyseverity',
type=str,
help='(Optional) - Filter - Policy Severity.')
parser.add_argument(
'-fct',
'--cloudtype',
type=str,
help='(Optional) - Filter - Cloud Type.')
parser.add_argument(
'-fca',
'--cloudaccount',
type=str,
help='(Optional) - Filter - Cloud Account.')
parser.add_argument(
'-fcaid',
'--cloudaccountid',
type=str,
help='(Optional) - Filter - Cloud Account ID.')
parser.add_argument(
'-fcr',
'--cloudregion',
type=str,
help='(Optional) - Filter - Cloud Region.')
parser.add_argument(
'-tr',
'--timerange',
type=int,
default=30,
help='(Optional) - Time Range in days. Defaults to 30.')
parser.add_argument(
'-l',
'--limit',
type=int,
default=500,
help='(Optional) - Return values limit (Default to 500).')
parser.add_argument(
'-fagt',
'--accountgroup',
type=str,
help='(Optional) - Filter - Account Group.')
parser.add_argument(
'-fpid',
'--policyid',
type=str,
help='(Optional) - Filter - Policy ID.')
parser.add_argument(
'-frid',
'--resourceid',
type=str,
help='(Optional) - Filter - Resource ID.')
args = parser.parse_args()
# --End parse command line arguments-- #
print('User login')
pc_settings = pc_lib_general.pc_login_get(args.username, args.password, args.uiurl)
print('Done')
# Verification (override with -y)
if not args.yes:
print()
print('Ready to excute commands aginst your Prisma Cloud tenant.')
verification_response = str(input('Would you like to continue (y or yes to continue)?'))
continue_response = {'yes', 'y'}
print()
if verification_response not in continue_response:
pc_lib_general.pc_exit_error(400, 'Verification failed due to user response. Exiting...')
#----------------------------------------------------------------------
print('API - Data Call 1 - Getting authentication token..')
pc_settings = pc_lib_api.pc_jwt_get(pc_settings)
print('Done.')
#current time/date utilized in CSV output filesnames
now = datetime.now().strftime("%m_%d_%Y-%I_%M_%p")
print ('Cloud Type Specified in CLI =', args.cloudtype)
print('Building the filters for JSON package.')
alerts_filter = {}
if args.detailed:
alerts_filter['detailed'] = True
else:
alerts_filter['detailed'] = False
alerts_filter['timeRange'] = {}
alerts_filter['timeRange']['type'] = "relative"
alerts_filter['timeRange']['value'] = {}
alerts_filter['timeRange']['value']['unit'] = "day"
alerts_filter['timeRange']['value']['amount'] = args.timerange
alerts_filter['sortBy'] = ["id:asc"]
alerts_filter['offset'] = 0
alerts_filter['limit'] = args.limit
alerts_filter['filters'] = []
if args.alertstatus is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "alert.status"
temp_filter['value'] = args.alertstatus
alerts_filter['filters'].append(temp_filter)
if args.alertid is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "alert.id"
temp_filter['value'] = args.alertid
alerts_filter['filters'].append(temp_filter)
if args.cloudaccount is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "cloud.account"
temp_filter['value'] = args.cloudaccount
alerts_filter['filters'].append(temp_filter)
if args.cloudregion is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "cloud.region"
temp_filter['value'] = args.cloudregion
alerts_filter['filters'].append(temp_filter)
if args.accountgroup is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "account.group"
temp_filter['value'] = args.accountgroup
alerts_filter['filters'].append(temp_filter)
if args.cloudaccountid is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "cloud.accountId"
temp_filter['value'] = args.cloudaccountid
alerts_filter['filters'].append(temp_filter)
if args.cloudtype is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "cloud.type"
temp_filter['value'] = args.cloudtype
alerts_filter['filters'].append(temp_filter)
if args.policytype is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "policy.type"
temp_filter['value'] = args.policytype
alerts_filter['filters'].append(temp_filter)
if args.policycomplianceStandard is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "policy.complianceStandard"
temp_filter['value'] = args.policycomplianceStandard
alerts_filter['filters'].append(temp_filter)
if args.policyseverity is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "policy.severity"
temp_filter['value'] = args.policyseverity
alerts_filter['filters'].append(temp_filter)
if args.policyid is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "policy.id"
temp_filter['value'] = args.policyid
alerts_filter['filters'].append(temp_filter)
if args.resourceid is not None:
temp_filter = {}
temp_filter['operator'] = "="
temp_filter['name'] = "resource.id"
temp_filter['value'] = args.policyid
alerts_filter['filters'].append(temp_filter)
print('Done building filters specified in CLI.')
#----------------------------------------------------------------------
print('API - Data Call 2 - Plugging in filters, a granular JSON response is now being prepared by Prisma. A job ID will be provided.')
pc_settings, response_package = pc_lib_api.api_async_alerts_job(pc_settings, data=alerts_filter)
alerts_job_number = response_package
print('Putting JSON response inside a dataframe - job_id')
job_id_json = | pandas.json_normalize(alerts_job_number) | pandas.json_normalize |
'''
nose tests for ipysig.sigma_addon_methods.py
'''
import unittest
import networkx as nx
import pandas as pd
import json
from ..sigma_addon_methods import *
from ..exceptions import IPySigmaGraphDataFrameValueError, \
IPySigmaGraphEdgeIndexError, IPySigmaGraphNodeIndexError, \
IPySigmaNodeTypeError, IPySigmaLabelError
from .mocks import InjectedGraph
import logging
logger = logging.getLogger(__name__)
class TestSigmaAddOns(unittest.TestCase):
@classmethod
def setUpClass(cls):
nx.Graph.sigma_make_graph = sigma_make_graph
nx.Graph.sigma_export_json = sigma_export_json
nx.Graph.sigma_add_degree_centrality = sigma_add_degree_centrality
nx.Graph.sigma_add_betweenness_centrality = sigma_add_betweenness_centrality
nx.Graph.sigma_add_pagerank = sigma_add_pagerank
nx.Graph.sigma_node_add_extra = sigma_node_add_extra
nx.Graph.sigma_choose_edge_color = sigma_choose_edge_color
nx.Graph.sigma_build_pandas_dfs = sigma_build_pandas_dfs
nx.Graph.sigma_edge_weights = sigma_edge_weights
nx.Graph.sigma_node_add_color_node_type = sigma_node_add_color_node_type
nx.Graph.sigma_node_add_label = sigma_node_add_label
nx.Graph.sigma_color_picker = sigma_color_picker
nx.Graph.sigma_assign_node_colors = sigma_assign_node_colors
def setUp(self):
self.graph = nx.complete_graph(10)
self.weighted_graph =nx.Graph()
self.weighted_graph.add_edge('a','b',weight=0.6)
self.weighted_graph.add_edge('a','c',weight=0.2)
self.weighted_graph.add_edge('c','d',weight=0.1)
self.weighted_graph.add_edge('c','e',weight=0.7)
self.graph_attr = nx.Graph()
self.graph_attr.add_edge(0,1)
self.graph_attr.add_edge(1,2)
self.graph_attr.add_edge(3,0)
self.graph_attr.add_node(0,{'node_type':'A', 'label':'node0'})
self.graph_attr.add_node(1,{'node_type':'A', 'label':'node1'})
self.graph_attr.add_node(2,{'node_type': 'B', 'label':'node2'})
self.graph_attr.add_node(3,{'node_type': 'C', 'label':'node2'})
def tearDown(self):
self.graph = None
self.weighted_graph = None
self.graph_attr = None
def test_make_graph(self):
self.graph.sigma_make_graph()
self.assertTrue(hasattr(self.graph, 'sigma_edges'))
self.assertTrue(hasattr(self.graph, 'sigma_nodes'))
def test_degree_centrality_is_added_to_graph_obj(self):
self.graph.sigma_add_degree_centrality()
node_data = self.graph.nodes(data=True)
for node in node_data:
self.assertIn('sigma_deg_central',node[1])
def test_betweenness_centrality_is_added_to_graph_obj(self):
self.graph.sigma_add_betweenness_centrality()
node_data = self.graph.nodes(data=True)
for node in node_data:
self.assertIn('sigma_between_central',node[1])
def test_pagerank_is_added_to_graph_obj(self):
self.graph.sigma_add_pagerank()
node_data = self.graph.nodes(data=True)
for node in node_data:
self.assertIn('sigma_pagerank',node[1])
def test_sigma_build_pandas_dfs_edges_nodes_exist(self):
self.graph.sigma_nodes, self.graph.sigma_edges = self.graph.sigma_build_pandas_dfs()
self.assertTrue(hasattr(self.graph, 'sigma_edges'))
self.assertTrue(hasattr(self.graph, 'sigma_nodes'))
self.weighted_graph.sigma_nodes, self.weighted_graph.sigma_edges = self.weighted_graph.sigma_build_pandas_dfs()
self.assertTrue(hasattr(self.weighted_graph, 'sigma_edges'))
self.assertTrue(hasattr(self.weighted_graph, 'sigma_nodes'))
def test_sigma_build_pandas_dfs_edges_nodes_not_none(self):
self.graph.sigma_nodes, self.graph.sigma_edges = self.graph.sigma_build_pandas_dfs()
self.assertIsNotNone(self.graph.sigma_nodes)
self.assertIsNotNone(self.graph.sigma_edges)
self.weighted_graph.sigma_nodes, self.weighted_graph.sigma_edges = self.weighted_graph.sigma_build_pandas_dfs()
self.assertIsNotNone(self.weighted_graph.sigma_nodes)
self.assertIsNotNone(self.weighted_graph.sigma_edges)
def test_extras_are_added_to_graph_obj(self):
self.graph.sigma_make_graph()
self.assertIn('y',self.graph.sigma_nodes)
self.assertIn('color',self.graph.sigma_nodes)
self.assertIn('size',self.graph.sigma_nodes)
self.assertIn('x', self.graph.sigma_nodes)
def test_edge_weight_color_size_are_set_if_exist(self):
self.weighted_graph.sigma_nodes, self.weighted_graph.sigma_edges = self.weighted_graph.sigma_build_pandas_dfs()
self.weighted_graph.sigma_edge_weights()
self.assertIn('size', self.weighted_graph.sigma_edges)
self.assertIn('color', self.weighted_graph.sigma_edges)
def test_sigma_node_df_has_id(self):
self.graph.sigma_nodes, self.graph.sigma_edges = self.graph.sigma_build_pandas_dfs()
self.assertIn('id',self.graph.sigma_nodes)
def test_sigma_make_graph_produces_neccesary_columns(self):
correct_nodes = ['id','x','y','label','node_type','color','size', 'sigma_deg_central', 'sigma_pagerank', 'sigma_between_central']
correct_edges = ['id','source', 'target', 'color']
correct_edges_weighted = ['id', 'source','target','color','weight','size']
self.graph.sigma_make_graph()
self.weighted_graph.sigma_make_graph()
node_col_list = self.graph.sigma_nodes.columns.tolist()
edge_col_list = self.graph.sigma_edges.columns.tolist()
weight_edge_col_list = self.weighted_graph.sigma_edges.columns.tolist()
for n in node_col_list:
self.assertIn(n, correct_nodes)
for e in edge_col_list:
self.assertIn(e, correct_edges)
for w in weight_edge_col_list:
self.assertIn(w, correct_edges_weighted)
def test_sigma_node_df_contains_additional_columns_if_available(self):
nx.set_node_attributes(self.graph, 'some_attr', 'xxx')
self.graph.sigma_make_graph()
self.assertIn('some_attr',self.graph.sigma_nodes)
def test_sigma_build_pandas_dfs_raise_node_index_error_if_blank(self):
blank_graph = InjectedGraph()
self.assertRaises(IPySigmaGraphNodeIndexError, blank_graph.sigma_make_graph)
def test_sigma_build_pandas_dfs_raise_edge_index_error_if_blank(self):
blank_graph = InjectedGraph()
blank_graph.add_nodes_from(['a','b','c'])
self.assertRaises(IPySigmaGraphEdgeIndexError, blank_graph.sigma_make_graph)
def test_sigma_build_pandas_dfs_raise_error_if_none(self):
def sigma_build_pandas_dfs_none():
'''
this simulates a bogus override of the sigma_build_pandas_df method
'''
return pd.DataFrame(None), pd.DataFrame(None)
self.graph.sigma_build_pandas_dfs = sigma_build_pandas_dfs_none
self.assertRaises(IPySigmaGraphDataFrameValueError, self.graph.sigma_make_graph)
def test_export_json_flag_if_empty(self):
def sigma_build_pandas_dfs_none():
'''
this simulates a bogus override of the sigma_build_pandas_df method
'''
return | pd.DataFrame(None) | pandas.DataFrame |
import logging
import sys
import pandas as pd
import pytest
import awswrangler as wr
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
@pytest.mark.parametrize("ext", ["xlsx", "xlsm", "xls", "odf"])
@pytest.mark.parametrize("use_threads", [True, False, 2])
def test_excel(path, ext, use_threads):
df = pd.DataFrame({"c0": [1, 2, 3], "c1": ["foo", "boo", "bar"]})
file_path = f"{path}0.{ext}"
pandas_kwargs = {}
if sys.version_info < (3, 7):
pandas_kwargs["engine"] = "xlwt" if ext == "xls" else "openpyxl"
wr.s3.to_excel(df, file_path, use_threads=use_threads, index=False, **pandas_kwargs)
if sys.version_info < (3, 7):
pandas_kwargs["engine"] = "xlrd" if ext == "xls" else "openpyxl"
df2 = wr.s3.read_excel(file_path, use_threads=use_threads, **pandas_kwargs)
assert df.equals(df2)
def test_read_xlsx_versioned(path) -> None:
path_file = f"{path}0.xlsx"
dfs = [ | pd.DataFrame({"c0": [0, 1, 2], "c1": [3, 4, 5]}) | pandas.DataFrame |
#!/usr/bin/env python3
"""
"""
from pathlib import Path
import numpy as np
import pandas as pd
def load_market_quality_statistics(filepath: Path,) -> pd.DataFrame:
"""
"""
daily_stats = pd.read_csv(filepath)
daily_stats["date"] = pd.to_datetime(daily_stats["date"], format="%Y-%m-%d")
daily_stats.set_index("date", inplace=True)
daily_stats.sort_index(inplace=True)
# After non-equivalence dummy
daily_stats["after_nonequivalence"] = daily_stats.index >= pd.Timestamp(
"2019-07-01"
)
daily_stats["order_to_trade"] = (
daily_stats["num_orders_total"] / daily_stats["num_transactions"]
)
daily_stats["num_orders_filled_percent"] = (
daily_stats["num_orders_filled"] / daily_stats["num_orders_total"]
)
daily_stats["num_orders_deleted_percent"] = (
daily_stats["num_orders_deleted"] / daily_stats["num_orders_total"]
)
daily_stats["message_counts_add_to_delete"] = (
daily_stats["message_counts_add_order"]
/ daily_stats["message_counts_delete_order"]
)
daily_stats["log_turnover"] = np.log(daily_stats["turnover"])
daily_stats["min_tick_size"] = daily_stats["tick_size_mean"]
daily_stats.drop(columns="tick_size_mean", inplace=True)
daily_stats["min_tick_size_relative"] = daily_stats["min_tick_size"] / daily_stats["price_mean"]
daily_stats["price_reciprocal"] = 1 / daily_stats["price_mean"]
daily_stats["price_log"] = np.log(daily_stats["price_mean"])
daily_stats["AT_proxy"] = (
(daily_stats["turnover"] / 100) / (daily_stats["message_counts_sum"]) * -1
) # Hendershott et al. 2011 JF p. 7
print(f"Initial number of stocks {daily_stats['isin'].nunique()}\n")
print("Filter to not include delisted stocks")
last_date_avail = daily_stats.reset_index()[["date", "isin"]].groupby("isin").max()
last_date_avail = last_date_avail.groupby("isin").max()
last_date_avail = last_date_avail[
last_date_avail["date"] != daily_stats.index.max()
]
delisted_isins = last_date_avail.index.to_list()
daily_stats = daily_stats[~daily_stats["isin"].isin(delisted_isins)]
print(f"Num remaining stocks {daily_stats['isin'].nunique()}\n")
print("Exclude all stocks that had an IPO later than January 1st 2019")
daily_stats.set_index("isin", append=True, inplace=True)
first_traded = daily_stats.reset_index().groupby("isin")["date"].min()
first_traded = first_traded.reset_index()
bad_isins = first_traded.loc[
first_traded["date"] >= pd.Timestamp("20190101"), "isin"
]
for isin in bad_isins:
daily_stats.drop(index=isin, level="isin", inplace=True)
# print("also dropping Panalpina, because it was taken-over in August, but continued trading")
# daily_stats.drop(index="CH0002168083", level="isin", inplace=True)
# exclude entries with no messages sent
daily_stats.dropna(subset=["message_counts_sum"], inplace=True)
daily_stats.reset_index("isin", inplace=True)
print(f"Num remaining stocks {daily_stats['isin'].nunique()}\n")
# join VSMI
vsmi = load_vsmi()
daily_stats = daily_stats.join(vsmi["VSMI"], how="left", on="date")
return daily_stats
def load_vsmi() -> pd.DataFrame:
"""
"""
vsmi = pd.read_csv("../statistics/h_vsmi_30.csv", sep=";")
vsmi.columns = vsmi.columns.str.lower()
vsmi.rename(columns={"indexvalue": "VSMI"}, inplace=True)
vsmi["date"] = pd.to_datetime(vsmi["date"], format="%d.%m.%Y")
vsmi.set_index("date", inplace=True)
return vsmi
def load_copustat():
path = Path("/Users/simon/data/turnover_per_venue/20200118_compustat.csv")
variables = dict(
isin="isin",
datadate="date",
cshoc="shares_outstanding",
cshtrd="trading_volume",
prccd="price_close",
exchg="exchange_code",
# prchd="price_high",
# prcld="price_low",
)
compu = pd.read_csv(path)
compu.rename(columns=variables, inplace=True)
compu.drop(
columns=[col for col in compu.columns if col not in variables.values()],
inplace=True,
)
compu = compu[compu["exchange_code"] == 151]
compu["date"] = pd.to_datetime(compu["date"], format="%Y%m%d")
compu.set_index(["date", "isin"], inplace=True)
return compu
def load_frag_data() -> pd.DataFrame:
mapping: pd.DataFrame = load_mapping()
frag: pd.DataFrame = load_bloomberg_data()
frag = frag.join(mapping).sort_index()
frag = frag.groupby("figi").transform(lambda x: x.ffill())
frag.reset_index("figi", inplace=True)
frag.set_index(["share_class_id_bb_global"], append=True, inplace=True)
# calculate fragmentation index (similar to Gresse, 2017, JFM, p. 6)
frag["market_volume"] = frag.groupby(["date", "share_class_id_bb_global"])[
"volume"
].sum()
frag["market_share"] = frag["volume"] / frag["market_volume"]
frag["market_share_squared"] = frag["market_share"] ** 2
frag["lit_frag"] = frag.groupby(
["date", "share_class_id_bb_global"]
)["market_share_squared"].sum() ** -1
# frag["inverse_market_share"] = 1 / (frag["volume"] / frag["market_volume"])
frag.reset_index("share_class_id_bb_global", inplace=True)
del frag["market_share_squared"]
# keep only six data
frag = frag[frag["mic"].isin(["xvtx", "xswx"])]
return frag
def load_bloomberg_data() -> pd.DataFrame:
bloomi = pd.read_csv(
f"~/data/turnover_per_venue/20191231_turnover_bloomberg.csv", sep=";"
)
bloomi = (
bloomi.iloc[1:].rename(columns={"Unnamed: 0": "date"}).set_index("date").stack()
)
bloomi = (
| pd.DataFrame(bloomi) | pandas.DataFrame |
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
"""
BLIS - Balancing Load of Intermittent Solar:
A characteristic-based transient power plant model
Copyright (C) 2020. University of Virginia Licensing & Ventures Group (UVA LVG). All Rights Reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Note: This code uses the results generated from run_monte_carlo1,run_monte_carlo2,run_monte_carlo3 and run_single_case_sCO2
# General imports
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
# Set Color Palette
colors = sns.color_palette("colorblind")
# Set resolution for saving figures
DPI = 1200
#%%=============================================================================#
# Figure 5 - Control Scheme
results_filename1 = "Results_SampleDay_Oct30th_sCO2_30.csv"
results_filename2 = "Results_SampleDay_Oct30th_sCO2_60.csv"
savename = "Fig5_ControlScheme.png"
#=============================================================================#
sns.set_style('white')
# Customize Time Shown
#file_t_start = -4.0
t_start = 4.0
t_end = 22.0
# Import results
df = pd.read_csv(results_filename1)
#df2 = pd.read_csv(results_filename2)
#f,a=plt.subplots(nrows=3,ncols=2,sharex=True)
#count = 0
for n in range(3):
x = df.index/60.0
if n ==0:
y1 = df.loc[:,'PowerOutput']
y2 = df.loc[:,'solar']
y3 = df.loc[:,'battDischargeRate']
y = np.vstack((y1,y2,y3))
pal = [colors[2],colors[1],colors[0]]
labels = ['Natural Gas','Solar PV','Battery']
y_label = "Generation (MW)"
# Don't label bottom
labelbottom = 'off'
yticks = [10,30,50]
elif n==1:
y1 = df.loc[:,'demand']
y2 = df.loc[:,'solar']-df.loc[:,'solarUsed']
y3 = df.loc[:,'battChargeRate']
# y4 = df.loc[:,'loadShed']
# y = np.vstack((y1,y2,y3,y4))
y = np.vstack((y1,y2,y3))
pal = [colors[7],colors[1],colors[0],colors[2]]
labels = ['Demand','Solar Curtailment','Battery','NG Load Shed']
y_label = 'Use (MW)'
# Don't label bottom
labelbottom = 'off'
yticks = [10,30,50]
elif n==2:
y = df.loc[:,'battCharge']/60.0
labels = ['Battery']
pal = [colors[0]]
y_label = 'Storage (MWh)'
labelbottom = 'on'
yticks = [0,20,40]
ax = plt.subplot(3 ,1, n ++ 1)
# ax = a[n][i]
if n==0 or n==1:
ax.stackplot(x, y, labels=labels,colors=pal)
ax.set_ylim(bottom=10.0,top = 50.0)
else:
ax.plot(x,y,label=labels[0])
ax.set_ylim(bottom=0.0,top = 30.0)
# plt.title(v)
ax.set_ylabel(y_label)
plt.xlim(left=t_start,right=t_end)
# Legend
ax.legend(loc='center left',bbox_to_anchor=(1.0, 0.5),fancybox=True)
box = ax.get_position()
ax.set_position([box.x0, box.y0 + box.height, box.width*0.8, box.height])
ax.legend(loc='center left',bbox_to_anchor=(1.0, 0.5),fancybox=True,fontsize=12)
# ax.legend(loc='left',ncol=len(labels),fancybox=True)
plt.tick_params(labelbottom=labelbottom)
if len(yticks)>2:
ax.set_yticks(yticks)
# Set aspect ratio https://jdhao.github.io/2017/06/03/change-aspect-ratio-in-mpl/
ratio = 0.25
xleft, xright = ax.get_xlim() # the abs method is used to make sure that all numbers are positive
ybottom, ytop = ax.get_ylim() # because x and y axis of an axes maybe inversed.
ax.set_aspect(abs((xright-xleft)/(ybottom-ytop))*ratio)
# Caption labels
caption_labels = ['A','B','C','D','E','F']
plt.text(0.05, 0.85, caption_labels[n], horizontalalignment='center',verticalalignment='center', transform=ax.transAxes,fontsize='medium',fontweight='bold')
# count = count + 1
# Adjust layout
plt.tight_layout()
# Save Figure
plt.savefig(savename,dpi=DPI,bbox_inches="tight")
plt.close()
#%%=============================================================================#
# Figure 6 - LCOE
results_filename = "results_monte_carlo.csv"
savename = "Fig6_LCOE.png"
#=============================================================================#
sns.reset_orig
sns.set_style('white')
# Import results
df = | pd.read_csv(results_filename) | pandas.read_csv |
# Arithmetic tests for DataFrame/Series/Index/Array classes that should
# behave identically.
from datetime import datetime, timedelta
import numpy as np
import pytest
from pandas.errors import (
NullFrequencyError, OutOfBoundsDatetime, PerformanceWarning)
import pandas as pd
from pandas import (
DataFrame, DatetimeIndex, NaT, Series, Timedelta, TimedeltaIndex,
Timestamp, timedelta_range)
import pandas.util.testing as tm
def get_upcast_box(box, vector):
"""
Given two box-types, find the one that takes priority
"""
if box is DataFrame or isinstance(vector, DataFrame):
return DataFrame
if box is Series or isinstance(vector, Series):
return Series
if box is pd.Index or isinstance(vector, pd.Index):
return pd.Index
return box
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Comparisons
class TestTimedelta64ArrayLikeComparisons:
# Comparison tests for timedelta64[ns] vectors fully parametrized over
# DataFrame/Series/TimedeltaIndex/TimedeltaArray. Ideally all comparison
# tests will eventually end up here.
def test_compare_timedelta64_zerodim(self, box_with_array):
# GH#26689 should unbox when comparing with zerodim array
box = box_with_array
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = pd.timedelta_range('2H', periods=4)
other = np.array(tdi.to_numpy()[0])
tdi = tm.box_expected(tdi, box)
res = tdi <= other
expected = np.array([True, False, False, False])
expected = tm.box_expected(expected, xbox)
tm.assert_equal(res, expected)
with pytest.raises(TypeError):
# zero-dim of wrong dtype should still raise
tdi >= np.array(4)
class TestTimedelta64ArrayComparisons:
# TODO: All of these need to be parametrized over box
def test_compare_timedelta_series(self):
# regression test for GH#5963
s = pd.Series([timedelta(days=1), timedelta(days=2)])
actual = s > timedelta(days=1)
expected = pd.Series([False, True])
tm.assert_series_equal(actual, expected)
def test_tdi_cmp_str_invalid(self, box_with_array):
# GH#13624
xbox = box_with_array if box_with_array is not pd.Index else np.ndarray
tdi = TimedeltaIndex(['1 day', '2 days'])
tdarr = tm.box_expected(tdi, box_with_array)
for left, right in [(tdarr, 'a'), ('a', tdarr)]:
with pytest.raises(TypeError):
left > right
with pytest.raises(TypeError):
left >= right
with pytest.raises(TypeError):
left < right
with pytest.raises(TypeError):
left <= right
result = left == right
expected = np.array([False, False], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
result = left != right
expected = np.array([True, True], dtype=bool)
expected = tm.box_expected(expected, xbox)
tm.assert_equal(result, expected)
@pytest.mark.parametrize('dtype', [None, object])
def test_comp_nat(self, dtype):
left = pd.TimedeltaIndex([pd.Timedelta('1 days'), pd.NaT,
pd.Timedelta('3 days')])
right = pd.TimedeltaIndex([pd.NaT, pd.NaT, pd.Timedelta('3 days')])
lhs, rhs = left, right
if dtype is object:
lhs, rhs = left.astype(object), right.astype(object)
result = rhs == lhs
expected = np.array([False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = rhs != lhs
expected = np.array([True, True, False])
tm.assert_numpy_array_equal(result, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs == pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT == rhs, expected)
expected = np.array([True, True, True])
tm.assert_numpy_array_equal(lhs != pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT != lhs, expected)
expected = np.array([False, False, False])
tm.assert_numpy_array_equal(lhs < pd.NaT, expected)
tm.assert_numpy_array_equal(pd.NaT > lhs, expected)
def test_comparisons_nat(self):
tdidx1 = pd.TimedeltaIndex(['1 day', pd.NaT, '1 day 00:00:01', pd.NaT,
'1 day 00:00:01', '5 day 00:00:03'])
tdidx2 = pd.TimedeltaIndex(['2 day', '2 day', pd.NaT, pd.NaT,
'1 day 00:00:02', '5 days 00:00:03'])
tdarr = np.array([np.timedelta64(2, 'D'),
np.timedelta64(2, 'D'), np.timedelta64('nat'),
np.timedelta64('nat'),
np.timedelta64(1, 'D') + np.timedelta64(2, 's'),
np.timedelta64(5, 'D') + np.timedelta64(3, 's')])
cases = [(tdidx1, tdidx2), (tdidx1, tdarr)]
# Check pd.NaT is handles as the same as np.nan
for idx1, idx2 in cases:
result = idx1 < idx2
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx2 > idx1
expected = np.array([True, False, False, False, True, False])
tm.assert_numpy_array_equal(result, expected)
result = idx1 <= idx2
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx2 >= idx1
expected = np.array([True, False, False, False, True, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 == idx2
expected = np.array([False, False, False, False, False, True])
tm.assert_numpy_array_equal(result, expected)
result = idx1 != idx2
expected = np.array([True, True, True, True, True, False])
tm.assert_numpy_array_equal(result, expected)
# TODO: better name
def test_comparisons_coverage(self):
rng = timedelta_range('1 days', periods=10)
result = rng < rng[3]
expected = np.array([True, True, True] + [False] * 7)
tm.assert_numpy_array_equal(result, expected)
# raise TypeError for now
with pytest.raises(TypeError):
rng < rng[3].value
result = rng == list(rng)
exp = rng == rng
tm.assert_numpy_array_equal(result, exp)
# ------------------------------------------------------------------
# Timedelta64[ns] dtype Arithmetic Operations
class TestTimedelta64ArithmeticUnsorted:
# Tests moved from type-specific test files but not
# yet sorted/parametrized/de-duplicated
def test_ufunc_coercions(self):
# normal ops are also tested in tseries/test_timedeltas.py
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [idx * 2, np.multiply(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['4H', '8H', '12H', '16H', '20H'],
freq='4H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '4H'
for result in [idx / 2, np.divide(idx, 2)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['1H', '2H', '3H', '4H', '5H'],
freq='H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'H'
idx = TimedeltaIndex(['2H', '4H', '6H', '8H', '10H'],
freq='2H', name='x')
for result in [-idx, np.negative(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['-2H', '-4H', '-6H', '-8H', '-10H'],
freq='-2H', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '-2H'
idx = TimedeltaIndex(['-2H', '-1H', '0H', '1H', '2H'],
freq='H', name='x')
for result in [abs(idx), np.absolute(idx)]:
assert isinstance(result, TimedeltaIndex)
exp = TimedeltaIndex(['2H', '1H', '0H', '1H', '2H'],
freq=None, name='x')
tm.assert_index_equal(result, exp)
assert result.freq is None
def test_subtraction_ops(self):
# with datetimes/timedelta and tdi/dti
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
td = Timedelta('1 days')
dt = Timestamp('20130101')
msg = "cannot subtract a datelike from a TimedeltaArray"
with pytest.raises(TypeError, match=msg):
tdi - dt
with pytest.raises(TypeError, match=msg):
tdi - dti
msg = (r"descriptor '__sub__' requires a 'datetime\.datetime' object"
" but received a 'Timedelta'")
with pytest.raises(TypeError, match=msg):
td - dt
msg = "bad operand type for unary -: 'DatetimeArray'"
with pytest.raises(TypeError, match=msg):
td - dti
result = dt - dti
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = dti - dt
expected = TimedeltaIndex(['0 days', '1 days', '2 days'], name='bar')
tm.assert_index_equal(result, expected)
result = tdi - td
expected = TimedeltaIndex(['0 days', pd.NaT, '1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = td - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '-1 days'], name='foo')
tm.assert_index_equal(result, expected, check_names=False)
result = dti - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], name='bar')
tm.assert_index_equal(result, expected, check_names=False)
result = dt - tdi
expected = DatetimeIndex(['20121231', pd.NaT, '20121230'], name='foo')
tm.assert_index_equal(result, expected)
def test_subtraction_ops_with_tz(self):
# check that dt/dti subtraction ops with tz are validated
dti = pd.date_range('20130101', periods=3)
ts = Timestamp('20130101')
dt = ts.to_pydatetime()
dti_tz = pd.date_range('20130101', periods=3).tz_localize('US/Eastern')
ts_tz = Timestamp('20130101').tz_localize('US/Eastern')
ts_tz2 = Timestamp('20130101').tz_localize('CET')
dt_tz = ts_tz.to_pydatetime()
td = Timedelta('1 days')
def _check(result, expected):
assert result == expected
assert isinstance(result, Timedelta)
# scalars
result = ts - ts
expected = Timedelta('0 days')
_check(result, expected)
result = dt_tz - ts_tz
expected = Timedelta('0 days')
_check(result, expected)
result = ts_tz - dt_tz
expected = Timedelta('0 days')
_check(result, expected)
# tz mismatches
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt_tz - dt
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
dt_tz - ts_tz2
msg = "can't subtract offset-naive and offset-aware datetimes"
with pytest.raises(TypeError, match=msg):
dt - dt_tz
msg = ("Timestamp subtraction must have the same timezones or no"
" timezones")
with pytest.raises(TypeError, match=msg):
ts - dt_tz
with pytest.raises(TypeError, match=msg):
ts_tz2 - ts
with pytest.raises(TypeError, match=msg):
ts_tz2 - dt
with pytest.raises(TypeError, match=msg):
ts_tz - ts_tz2
# with dti
with pytest.raises(TypeError, match=msg):
dti - ts_tz
with pytest.raises(TypeError, match=msg):
dti_tz - ts
with pytest.raises(TypeError, match=msg):
dti_tz - ts_tz2
result = dti_tz - dt_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = dt_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = dti_tz - ts_tz
expected = TimedeltaIndex(['0 days', '1 days', '2 days'])
tm.assert_index_equal(result, expected)
result = ts_tz - dti_tz
expected = TimedeltaIndex(['0 days', '-1 days', '-2 days'])
tm.assert_index_equal(result, expected)
result = td - td
expected = Timedelta('0 days')
_check(result, expected)
result = dti_tz - td
expected = DatetimeIndex(
['20121231', '20130101', '20130102'], tz='US/Eastern')
tm.assert_index_equal(result, expected)
def test_dti_tdi_numeric_ops(self):
# These are normally union/diff set-like ops
tdi = TimedeltaIndex(['1 days', pd.NaT, '2 days'], name='foo')
dti = pd.date_range('20130101', periods=3, name='bar')
# TODO(wesm): unused?
# td = Timedelta('1 days')
# dt = Timestamp('20130101')
result = tdi - tdi
expected = TimedeltaIndex(['0 days', pd.NaT, '0 days'], name='foo')
| tm.assert_index_equal(result, expected) | pandas.util.testing.assert_index_equal |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s, dtype=np.int64))
s = [1., 3., 4., 5.]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
# bool is regarded as numeric
s = [True, False, True, True]
res = to_numeric(s)
tm.assert_numpy_array_equal(res, np.array(s))
def test_numeric(self):
s = pd.Series([1, -3.14, 7], dtype='O')
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series([1, -3.14, 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
# GH 14827
df = pd.DataFrame(dict(
a=[1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), '0.1'],
b=[1.0, 2.0, 3.0, 4.0],
))
expected = pd.DataFrame(dict(
a=[1.2, 3.14, np.inf, 0.1],
b=[1.0, 2.0, 3.0, 4.0],
))
# Test to_numeric over one column
df_copy = df.copy()
df_copy['a'] = df_copy['a'].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
# Test to_numeric over multiple columns
df_copy = df.copy()
df_copy[['a', 'b']] = df_copy[['a', 'b']].apply(to_numeric)
tm.assert_frame_equal(df_copy, expected)
def test_numeric_lists_and_arrays(self):
# Test to_numeric with embedded lists and arrays
df = pd.DataFrame(dict(
a=[[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 1.6, 0.1],
))
tm.assert_frame_equal(df, expected)
df = pd.DataFrame(dict(
a=[np.array([decimal.Decimal(3.14), 1.0]), 0.1]
))
df['a'] = df['a'].apply(to_numeric)
expected = pd.DataFrame(dict(
a=[[3.14, 1.0], 0.1],
))
tm.assert_frame_equal(df, expected)
def test_all_nan(self):
s = pd.Series(['a', 'b', 'c'])
res = to_numeric(s, errors='coerce')
expected = pd.Series([np.nan, np.nan, np.nan])
tm.assert_series_equal(res, expected)
@pytest.mark.parametrize("errors", [None, "ignore", "raise", "coerce"])
def test_type_check(self, errors):
# see gh-11776
df = pd.DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})
kwargs = dict(errors=errors) if errors is not None else dict()
error_ctx = pytest.raises(TypeError, match="1-d array")
with error_ctx:
to_numeric(df, **kwargs)
def test_scalar(self):
assert pd.to_numeric(1) == 1
assert pd.to_numeric(1.1) == 1.1
assert pd.to_numeric('1') == 1
assert pd.to_numeric('1.1') == 1.1
with pytest.raises(ValueError):
to_numeric('XX', errors='raise')
assert to_numeric('XX', errors='ignore') == 'XX'
assert np.isnan(to_numeric('XX', errors='coerce'))
def test_numeric_dtypes(self):
idx = pd.Index([1, 2, 3], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
idx = pd.Index([1., np.nan, 3., np.nan], name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, idx)
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.values)
def test_str(self):
idx = pd.Index(['1', '2', '3'], name='xxx')
exp = np.array([1, 2, 3], dtype='int64')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
idx = pd.Index(['1.5', '2.7', '3.4'], name='xxx')
exp = np.array([1.5, 2.7, 3.4])
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(exp, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(exp, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, exp)
def test_datetime_like(self, tz_naive_fixture):
idx = pd.date_range("20130101", periods=3,
tz=tz_naive_fixture, name="xxx")
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name="xxx"))
res = pd.to_numeric(pd.Series(idx, name="xxx"))
tm.assert_series_equal(res, pd.Series(idx.asi8, name="xxx"))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_timedelta(self):
idx = pd.timedelta_range('1 days', periods=3, freq='D', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
res = pd.to_numeric(pd.Series(idx, name='xxx'))
tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
res = pd.to_numeric(idx.values)
tm.assert_numpy_array_equal(res, idx.asi8)
def test_period(self):
idx = pd.period_range('2011-01', periods=3, freq='M', name='xxx')
res = pd.to_numeric(idx)
tm.assert_index_equal(res, pd.Index(idx.asi8, name='xxx'))
# TODO: enable when we can support native PeriodDtype
# res = pd.to_numeric(pd.Series(idx, name='xxx'))
# tm.assert_series_equal(res, pd.Series(idx.asi8, name='xxx'))
def test_non_hashable(self):
# Test for Bug #13324
s = pd.Series([[10.0, 2], 1.0, 'apple'])
res = pd.to_numeric(s, errors='coerce')
tm.assert_series_equal(res, pd.Series([np.nan, 1.0, np.nan]))
res = pd.to_numeric(s, errors='ignore')
tm.assert_series_equal(res, pd.Series([[10.0, 2], 1.0, 'apple']))
with pytest.raises(TypeError, match="Invalid object type"):
pd.to_numeric(s)
@pytest.mark.parametrize("data", [
["1", 2, 3],
[1, 2, 3],
np.array(["1970-01-02", "1970-01-03",
"1970-01-04"], dtype="datetime64[D]")
])
def test_downcast_basic(self, data):
# see gh-13352
invalid_downcast = "unsigned-integer"
msg = "invalid downcasting method provided"
with pytest.raises(ValueError, match=msg):
pd.to_numeric(data, downcast=invalid_downcast)
expected = np.array([1, 2, 3], dtype=np.int64)
# Basic function tests.
res = | pd.to_numeric(data) | pandas.to_numeric |
# store_data_file.py - Process JSON files from AWS S3 to PostgreSQL Database
# forked from Joinville Smart Mobility Project and Louisville WazeCCPProcessor
# modified by <EMAIL> for City of Los Angeles CCP
import os
import sys
import json
import numpy as np
import pandas as pd
from pandas.io.json import json_normalize
import hashlib
from sqlalchemy import create_engine, exc, MetaData, select
from sqlalchemy.engine.url import URL
from sqlalchemy.sql import or_, and_
from sqlalchemy.types import JSON as typeJSON
from geoalchemy2 import Geometry, Geography
from pathlib import Path
from datetime import datetime, timedelta
import time
#import boto3
# airflow config
import logging
from airflow import DAG
#from airflow.operators.bash_operator import BashOperator
from airflow.operators.python_operator import PythonOperator
from airflow.sensors.s3_key_sensor import S3KeySensor
from airflow.operators.email_operator import EmailOperator
from airflow.hooks.S3_hook import S3Hook
from airflow.hooks.postgres_hook import PostgresHook
from airflow.models import Variable #db schema and s3 bucket names stored in Variables
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'start_date': datetime(2018, 12, 3),
'email': ['<EMAIL>','<EMAIL>'],
'email_on_failure': True,
'email_on_retry': False,
'retries': 1
#'retry_delay': timedelta(minutes=5),
# 'queue': 'bash_queue',
# 'pool': 'backfill',
# 'priority_weight': 10,
# 'end_date': datetime(2016, 1, 1),
}
dag = DAG('waze-s3-to-sql',
description='DAG for moving Waze data from S3 to RDS',
catchup=False, default_args=default_args,schedule_interval="*/1 * * * *",concurrency=3,max_active_runs=3) #"@hourly") # *****=run every minute
def s3keyCheck():
logging.info("s3keyCheck called")
print ('s3keyCheck')
hook = S3Hook(aws_conn_id="s3_conn")
if (hook.check_for_wildcard_key(wildcard_key="*",bucket_name=bucket_source_name)):
logging.info("wildcard found key")
return True
else:
logging.info("no key found")
return False
def logTest():
logging.info('logTest called')
def connect_database(connection_id):
logging.info ("connect_database called")
dbhook = PostgresHook(postgres_conn_id=connection_id)
logging.info ("got hook")
pgconn = dbhook.get_connection(conn_id=connection_id)
logging.info ("got connection")
# DATABASE = database_dict
# db_url = URL(**DATABASE)
db_url = 'postgresql://{c.login}:{c.password}@{c.host}:{c.port}/{c.schema}'.format(c=pgconn)
#logging.info ("url: "+db_url)
engine = create_engine(db_url)
meta = MetaData()
meta.bind = engine
return meta
def tab_raw_data(s3_key_obj):
#read data file
#doc = open(datafile_s3_key) # old local file method
# get file from S3
#obj = client.get_object(Bucket=bucket_source_name,Key=datafile_s3_key)
records = json.loads(s3_key_obj.get()['Body'].read()) #doc)
raw_data = json_normalize(records)
#clean date fields
raw_data['startTime'] = pd.to_datetime(raw_data['startTime'].str[:-4])
raw_data['endTime'] = pd.to_datetime(raw_data['endTime'].str[:-4])
raw_data['startTime'] = raw_data['startTime'].astype(pd.Timestamp)
raw_data['endTime'] = raw_data['endTime'].astype(pd.Timestamp)
#get creation time
raw_data['date_created'] = pd.to_datetime('today')
#during creation time, date_updated equals date_created
raw_data['date_updated'] = raw_data['date_created']
#get_file_name
raw_data['file_name'] = s3_key_obj.key #datafile_s3_key
#get AJI hash
aji_list = [json.dumps(raw_data['alerts'].iloc[0],sort_keys=True) if 'alerts' in raw_data else '',
json.dumps(raw_data['jams'].iloc[0],sort_keys=True) if 'jams' in raw_data else '',
json.dumps(raw_data['irregularities'].iloc[0],sort_keys=True) if 'irregularities' in raw_data else '',
]
aji_string = "".join(aji_list)
raw_data['json_hash'] = hashlib.md5(aji_string.encode()).hexdigest()
return raw_data
def tab_jams(raw_data):
if 'jams' not in raw_data:
#print("No jams in this data file.")
return
df_jams = json_normalize(raw_data['jams'].iloc[0])
col_dict = {
'blockingAlertUuid': "blocking_alert_id",
'startNode': "start_node",
'endNode': "end_node",
'pubMillis': "pub_millis",
'roadType': "road_type",
'speedKMH': "speed_kmh",
'turnType': "turn_type",
}
other_cols = ['city', 'country','delay', 'length',
'uuid', 'street', 'level', 'line',
'type','speed','id'] #segments
df_jams.rename(columns=col_dict, inplace=True)
col_list = list(col_dict.values())
col_list = col_list + other_cols
all_columns = pd.DataFrame(np.nan, index=[0], columns=col_list)
df_jams,_ = df_jams.align(all_columns, axis=1)
df_jams = df_jams[col_list]
df_jams["pub_utc_date"] = pd.to_datetime(df_jams["pub_millis"], unit='ms')
#id_string = str(df_jams["pub_millis"])+str(raw_data["json_hash"])
#print(id_string)
#df_jams["id"] = id_string;
return df_jams
def tab_irregularities(raw_data):
if 'irregularities' not in raw_data:
#print("No irregularities in this data file.")
return
df_irregs = json_normalize(raw_data['irregularities'].iloc[0])
col_dict = {
'detectionDateMillis': "detection_date_millis",
'detectionDate': "detection_date",
'updateDateMillis': "update_date_millis",
'updateDate': "update_date",
'regularSpeed': "regular_speed",
'delaySeconds': "delay_seconds",
'jamLevel': "jam_level",
'driversCount': "drivers_count",
'alertsCount': "alerts_count",
'nThumbsUp': "n_thumbs_up",
'nComments': "n_comments",
'nImages': "n_images",
'endNode': "end_node",
'startNode': "start_node",
'highway': "is_highway"
}
other_cols = ['street', 'city', 'country', 'speed',
'seconds', 'length', 'trend', 'type', 'severity', 'line','id']
df_irregs.rename(columns=col_dict, inplace=True)
col_list = list(col_dict.values())
col_list = col_list + other_cols
all_columns = | pd.DataFrame(np.nan, index=[0], columns=col_list) | pandas.DataFrame |
"""Main module
# Resources
- Reference google sheets:
- Source data: https://docs.google.com/spreadsheets/d/1jzGrVELQz5L4B_-DqPflPIcpBaTfJOUTrVJT5nS_j18/edit#gid=1335629675
- Source data (old): https://docs.google.com/spreadsheets/d/17hHiqc6GKWv9trcW-lRnv-MhZL8Swrx2/edit#gid=1335629675
- Output example: https://docs.google.com/spreadsheets/d/1uroJbhMmOTJqRkTddlSNYleSKxw4i2216syGUSK7ZuU/edit?userstoinvite=<EMAIL>&actionButton=1#gid=435465078
"""
import json
import os
import pickle
import sys
from copy import copy
from datetime import datetime, timezone
from pathlib import Path
from typing import Dict, List, OrderedDict
from uuid import uuid4
import pandas as pd
try:
import ArgumentParser
except ModuleNotFoundError:
from argparse import ArgumentParser
from vsac_wrangler.config import CACHE_DIR, DATA_DIR, PROJECT_ROOT
from vsac_wrangler.definitions.constants import FHIR_JSON_TEMPLATE
from vsac_wrangler.google_sheets import get_sheets_data
from vsac_wrangler.vsac_api import get_ticket_granting_ticket, get_value_sets
from vsac_wrangler.interfaces._cli import get_parser
# USER1: This is an actual ID to a valid user in palantir, who works on our BIDS team.
PROJECT_NAME = 'RP-4A9E27'
PALANTIR_ENCLAVE_USER_ID_1 = 'a39723f3-dc9c-48ce-90ff-06891c29114f'
PARSE_ARGS = get_parser().parse_args() # for convenient access later
OUTPUT_NAME ='palantir-three-file' # currently this is the only value used
SOURCE_NAME ='vsac'
def get_runtime_provenance() -> str:
"""Get provenance info related to this runtime operation"""
return f'oids from {PARSE_ARGS.input_path} => VSAC trad API => 3-file dir {get_out_dir()}'
def format_label(label, verbose_prefix=False) -> str:
"""Adds prefix and trims whitespace"""
label = label.strip()
prefix = 'VSAC' if not verbose_prefix else get_runtime_provenance()
return f'[{prefix}] {label}'
def get_out_dir(output_name=OUTPUT_NAME, source_name=SOURCE_NAME) -> str:
date_str = datetime.now().strftime('%Y.%m.%d')
out_dir = os.path.join(DATA_DIR, output_name, source_name, date_str, 'output')
return out_dir
# to-do: Shared lib for this stuff?
# noinspection DuplicatedCode
def _save_csv(df: pd.DataFrame, filename, output_name=OUTPUT_NAME, source_name=SOURCE_NAME, field_delimiter=','):
"""Side effects: Save CSV"""
out_dir = get_out_dir(output_name=output_name, source_name=source_name)
os.makedirs(out_dir, exist_ok=True)
output_format = 'csv' if field_delimiter == ',' else 'tsv' if field_delimiter == '\t' else 'txt'
outpath = os.path.join(out_dir, f'{filename}.{output_format}')
df.to_csv(outpath, sep=field_delimiter, index=False)
def _datetime_palantir_format() -> str:
"""Returns datetime str in format used by palantir data enclave
e.g. 2021-03-03T13:24:48.000Z (milliseconds allowed, but not common in observed table)"""
return datetime.now(timezone.utc).strftime("%Y-%m-%dT%H:%M:%S.%fZ")[:-4] + 'Z'
def save_json(value_sets, output_structure, json_indent=4) -> List[Dict]:
"""Save JSON"""
# Populate JSON objs
d_list: List[Dict] = []
for value_set in value_sets:
value_set2 = {}
if output_structure == 'fhir':
value_set2 = vsac_to_fhir(value_set)
elif output_structure == 'vsac':
value_set2 = vsac_to_vsac(value_set)
elif output_structure == 'atlas':
raise NotImplementedError('For "atlas" output-structure, output-format "json" not yet implemented.')
d_list.append(value_set2)
# Save file
for d in d_list:
if 'name' in d:
valueset_name = d['name']
else:
valueset_name = d['Concept Set Name']
valueset_name = valueset_name.replace('/', '|')
filename = valueset_name + '.json'
filepath = os.path.join(DATA_DIR, filename)
with open(filepath, 'w') as fp:
if json_indent:
json.dump(d, fp, indent=json_indent)
else:
json.dump(d, fp)
return d_list
# TODO: repurpose this to use VSAC format
# noinspection DuplicatedCode
def vsac_to_fhir(value_set: Dict) -> Dict:
"""Convert VSAC JSON dict to FHIR JSON dict"""
# TODO: cop/paste FHIR_JSON_TEMPLATE literally here instead and use like other func
d: Dict = copy(FHIR_JSON_TEMPLATE)
d['id'] = int(value_set['valueSet.id'][0])
d['text']['div'] = d['text']['div'].format(value_set['valueSet.description'][0])
d['url'] = d['url'].format(str(value_set['valueSet.id'][0]))
d['name'] = value_set['valueSet.name'][0]
d['title'] = value_set['valueSet.name'][0]
d['status'] = value_set['valueSet.status'][0]
d['description'] = value_set['valueSet.description'][0]
d['compose']['include'][0]['system'] = value_set['valueSet.codeSystem'][0]
d['compose']['include'][0]['version'] = value_set['valueSet.codeSystemVersion'][0]
concepts = []
d['compose']['include'][0]['concept'] = concepts
return d
# TODO:
def vsac_to_vsac(v: Dict, depth=2) -> Dict:
"""Convert VSAC JSON dict to OMOP JSON dict
This is the format @DaveraGabriel specified by looking at the VSAC web interface."""
# Attempt at regexp
# Clinical Focus: Asthma conditions which suggest applicability of NHLBI NAEPP EPR3 Guidelines for the Diagnosis and
# Management of Asthma (2007) and the 2020 Focused Updates to the Asthma Management Guidelines),(Data Element Scope:
# FHIR Condition.code),(Inclusion Criteria: SNOMEDCT concepts in "Asthma SCT" and ICD10CM concepts in "Asthma
# ICD10CM" valuesets.),(Exclusion Criteria: none)
# import re
# regexer = re.compile('\((.+): (.+)\)') # fail
# regexer = re.compile('\((.+): (.+)\)[,$]')
# found = regexer.match(value_sets['ns0:Purpose'])
# x1 = found.groups()[0]
purposes = v['ns0:Purpose'].split('),')
d = {
"Concept Set Name": v['@displayName'],
"Created At": 'vsacToOmopConversion:{}; vsacRevision:{}'.format(
datetime.now().strftime('%Y/%m/%d'),
v['ns0:RevisionDate']),
"Created By": v['ns0:Source'],
# "Created By": "https://github.com/HOT-Ecosystem/ValueSet-Converters",
"Intention": {
"Clinical Focus": purposes[0].split('(Clinical Focus: ')[1],
"Inclusion Criteria": purposes[2].split('(Inclusion Criteria: ')[1],
"Data Element Scope": purposes[1].split('(Data Element Scope: ')[1],
"Exclusion Criteria": purposes[3].split('(Exclusion Criteria: ')[1],
},
"Limitations": {
"Exclusion Criteria": "",
"VSAC Note": None, # VSAC Note: (exclude if null)
},
"Provenance": {
"Steward": "",
"OID": "",
"Code System(s)": [],
"Definition Type": "",
"Definition Version": "",
}
}
# TODO: use depth to make this either nested JSON, or, if depth=1, concatenate
# ... all intention sub-fields into a single string, etc.
if depth == 1:
d['Intention'] = ''
elif depth < 1 or depth > 2:
raise RuntimeError(f'vsac_to_vsac: depth parameter valid range: 1-2, but depth of {depth} was requested.')
return d
def get_vsac_csv(
value_sets: List[OrderedDict], google_sheet_name=None, field_delimiter=',', code_delimiter='|', filename='vsac_csv'
) -> pd.DataFrame:
"""Convert VSAC hiearchical XML in a VSAC-oriented tabular file"""
rows = []
for value_set in value_sets:
code_system_codes = {}
name = value_set['@displayName']
purposes = value_set['ns0:Purpose'].split('),')
purposes2 = []
for p in purposes:
i1 = 1 if p.startswith('(') else 0
i2 = -1 if p[len(p) - 1] == ')' else len(p)
purposes2.append(p[i1:i2])
concepts = value_set['ns0:ConceptList']['ns0:Concept']
concepts = concepts if type(concepts) == list else [concepts]
for concept in concepts:
code = concept['@code']
code_system = concept['@codeSystemName']
if code_system not in code_system_codes:
code_system_codes[code_system] = []
code_system_codes[code_system].append(code)
for code_system, codes in code_system_codes.items():
row = {
'name': name,
'nameVSAC': '[VSAC] ' + name,
'oid': value_set['@ID'],
'codeSystem': code_system,
'limitations': purposes2[3],
'intention': '; '.join(purposes2[0:3]),
'provenance': '; '.join([
'Steward: ' + value_set['ns0:Source'],
'OID: ' + value_set['@ID'],
'Code System(s): ' + ','.join(list(code_system_codes.keys())),
'Definition Type: ' + value_set['ns0:Type'],
'Definition Version: ' + value_set['@version'],
'Accessed: ' + str(datetime.now())[0:-7]
]),
}
if len(codes) < 2000:
row['codes'] = code_delimiter.join(codes)
else:
row['codes'] = code_delimiter.join(codes[0:1999])
if len(codes) < 4000:
row['codes2'] = code_delimiter.join(codes[2000:])
else:
row['codes2'] = code_delimiter.join(codes[2000:3999])
row['codes3'] = code_delimiter.join(codes[4000:])
row2 = {}
for k, v in row.items():
row2[k] = v.replace('\n', ' - ') if type(v) == str else v
row = row2
rows.append(row)
# Create/Return DF & Save CSV
df = pd.DataFrame(rows)
_save_csv(df, filename=filename, source_name=google_sheet_name, field_delimiter=field_delimiter)
return df
def get_ids_for_palantir3file(value_sets: pd.DataFrame) -> Dict[str, int]:
oid_enclave_code_set_id_map_csv_path = os.path.join(PROJECT_ROOT, 'data', 'cset.csv')
oid_enclave_code_set_id_df = pd.read_csv(oid_enclave_code_set_id_map_csv_path)
missing_oids = set(value_sets['@ID']) - set(oid_enclave_code_set_id_df['oid'])
if len(missing_oids) > 0:
google_sheet_url = PARSE_ARGS.google_sheet_url
new_ids = [
id for id in oid_enclave_code_set_id_df.internal_id.max() + 1 + range(0, len(missing_oids))]
missing_recs = pd.DataFrame(data={
'source_id_field': ['oid' for i in range(0, len(missing_oids))],
'oid': [oid for oid in missing_oids],
'ccsr_code': [None for i in range(0, len(missing_oids))],
'internal_id': new_ids,
'internal_source': [google_sheet_url for i in range(0, len(missing_oids))],
'cset_source': ['VSAC' for i in range(0, len(missing_oids))],
'grouped_by_bids': [None for i in range(0, len(missing_oids))],
'concept_id': [None for i in range(0, len(missing_oids))],
})
oid_enclave_code_set_id_df = pd.concat([oid_enclave_code_set_id_df, missing_recs])
oid_enclave_code_set_id_df.to_csv(oid_enclave_code_set_id_map_csv_path, index=False)
oid__codeset_id_map = dict(zip(
oid_enclave_code_set_id_df['oid'],
oid_enclave_code_set_id_df['internal_id']))
return oid__codeset_id_map
def get_palantir_csv(
value_sets: pd.DataFrame, source_name='vsac', field_delimiter=',',
filename1='concept_set_version_item_rv_edited', filename2='code_sets', filename3='concept_set_container_edited'
) -> Dict[str, pd.DataFrame]:
"""Convert VSAC hiearchical XML to CSV compliant w/ Palantir's OMOP-inspired concept set editor data model"""
# I. Create IDs that will be shared between files
oid__codeset_id_map = get_ids_for_palantir3file(value_sets)
# II. Create & save exports
_all = {}
# 1. Palantir enclave table: concept_set_version_item_rv_edited
rows1 = []
for i, value_set in value_sets.iterrows():
codeset_id = oid__codeset_id_map[value_set['@ID']]
for concept in value_set['concepts']:
code = concept['@code']
code_system = concept['@codeSystemName']
# The 3 fields isExcluded, includeDescendants, and includeMapped, are from OMOP but also in VSAC. If it has
# ...these 3 options, it is intensional. And when you execute these 3, it is now extensional / expansion.
row = {
'codeset_id': codeset_id,
'concept_id': '', # leave blank for now
# <non-palantir fields>
'code': code,
'codeSystem': code_system,
# </non-palantir fields>
'isExcluded': False,
'includeDescendants': True,
'includeMapped': False,
'item_id': str(uuid4()), # will let palantir verify ID is indeed unique
# TODO: @Stephanie: Is there any annotation we want at all?
# 'annotation': json.dumps({
# 'when': str(datetime.now().strftime('%Y-%m-%d')),
# 'who': 'Data Ingest & Harmonization (DIH)',
# 'project': 'N3C-enclave-import',
# 'oids-source': PARSE_ARGS.input_path,
# 'generation-process': get_runtime_provenance(),
# 'valueset-source': 'VSAC',
# }),
# 'annotation': '; '.join([
# 'when: ' + str(datetime.now().strftime('%Y-%m-%d')),
# 'who: ' + 'Data Ingest & Harmonization (DIH)',
# 'project: ' + 'N3C-enclave-import',
# 'oids-source: ' + PARSE_ARGS.input_path,
# 'generation-process: ' + get_runtime_provenance(),
# 'valueset-source: ' + 'VSAC',
# ]),
'annotation': '',
# 'created_by': 'DI&H Bulk Import',
'created_by': PALANTIR_ENCLAVE_USER_ID_1,
'created_at': _datetime_palantir_format()
}
row2 = {}
for k, v in row.items():
row2[k] = v.replace('\n', ' - ') if type(v) == str else v
row = row2
rows1.append(row)
df1 = pd.DataFrame(rows1)
_all[filename1] = df1
_save_csv(
df1, filename=filename1, source_name=source_name, field_delimiter=field_delimiter)
# 2. Palantir enclave table: code_sets
rows2 = []
for i, value_set in value_sets.iterrows():
codeset_id = oid__codeset_id_map[value_set['@ID']]
concept_set_name = format_label(value_set['@displayName'])
purposes = value_set['ns0:Purpose'].split('),')
purposes2 = []
for p in purposes:
i1 = 1 if p.startswith('(') else 0
i2 = -1 if p[len(p) - 1] == ')' else len(p)
purposes2.append(p[i1:i2])
code_system_codes = {}
code_systems = []
for concept in value_set['concepts']:
code = concept['@code']
code_system = concept['@codeSystemName']
if code_system not in code_system_codes:
code_system_codes[code_system] = []
if code_system not in code_systems:
code_systems.append(code_system)
code_system_codes[code_system].append(code)
# concept_set_name = concept_set_name + ' ' + '(' + ';'.join(code_systems) + ')'
row = {
# 'codeset_id': oid__codeset_id_map[value_set['@ID']],
# 'codeset_id': value_set['@displayName'],
'codeset_id': codeset_id,
'concept_set_name': concept_set_name,
'concept_set_version_title': concept_set_name + ' (v1)',
'project': PROJECT_NAME, # always use this project id for bulk import
'source_application': 'EXTERNAL VSAC',
'source_application_version': '', # nullable
'created_at': _datetime_palantir_format(),
'atlas_json': '', # nullable
'is_most_recent_version': True,
'version': 1,
'comments': 'Exported from VSAC and bulk imported to N3C.',
'intention': '; '.join(purposes2[0:3]), # nullable
'limitations': purposes2[3], # nullable
'issues': '', # nullable
'update_message': 'Initial version.', # nullable (maybe?)
# status field stats as appears in the code_set table 2022/01/12:
# 'status': [
# '', # null
# 'Finished',
# 'In Progress',
# 'Awaiting Review',
# 'In progress',
# ][2],
# status field doesn't show this in stats in code_set table, but UI uses this value by default:
'status': 'Under Construction',
'has_review': '', # boolean (nullable)
'reviewed_by': '', # nullable
'created_by': PALANTIR_ENCLAVE_USER_ID_1,
'authority': value_set['ns0:Source'],
'provenance': '; '.join([
'Steward: ' + value_set['ns0:Source'],
'OID: ' + value_set['@ID'],
'dih_id: ' + str(codeset_id),
'Code System(s): ' + ','.join(list(code_system_codes.keys())),
'Definition Type: ' + value_set['ns0:Type'],
'Definition Version: ' + value_set['@version'],
'Accessed: ' + str(datetime.now())[0:-7]
]),
'atlas_json_resource_url': '', # nullable
# null, initial version will not have the parent version so this field would be always null:
'parent_version_id': '', # nullable
# True ( after the import view it from the concept set editor to review the concept set and click done.
# We can add the comments like we imported from VSAC and reviewed it from the concept set editor. )
# 1. import 2. manual check 3 click done to finish the definition. - if we want to manually review them
# first and click Done:
'is_draft': True,
}
row2 = {}
for k, v in row.items():
row2[k] = v.replace('\n', ' - ') if type(v) == str else v
row = row2
rows2.append(row)
df2 = | pd.DataFrame(rows2) | pandas.DataFrame |
from .tdx_parser import TDXParser
import pandas as pd
import numpy as np
import json
from collections import deque
class Formula(object):
buy_kw = [r'买入', r'买', 'BUY', 'BUYIN', 'ENTERLONG']
sell_kw = [r'卖出', r'卖', 'SELL', 'SELLOUT', 'EXITLONG']
FIGURE_DATA_LEN = 200
def __init__(self, text, param_desc='', **kwargs):
self.figure_data_len = Formula.FIGURE_DATA_LEN
if 'json_len' in kwargs:
self.figure_data_len = kwargs['json_len']
self.text = text
self.param_desc = param_desc
parser = TDXParser()
self.ast = parser.parse(text)
self.const = {
'PERCENT_HLINE' : [0, 20, 50, 80, 100],
}
self._df = None
self.local = {}
self.buy = None
self.sell = None
self.builtin = {
'HHV': (self.HHV, r'%s的%s日内的最高价', 2),
'LLV': (self.LLV, r'%s的%s日内的最低价', 2),
'SUM': (self.SUM, r'%s的%s日数值之和', 2),
'REF': (self.REF, r'%s的%s日前的参考值', 2),
'CROSS': (self.CROSS, r'在%s上穿%s时触发', 2),
'NOT': (self.NOT, r'对%s取反逻辑', 1),
'IF' : (self.IF, r'IF(%s, %s, %s)', 3),
'IFF' : (self.IF, r'IFF(%s, %s, %s)', 3),
'EVERY' : (self.EVERY, r'%s周期内均满足%s', 2),
'EXIST' : (self.EXIST, r'EXIST(%s, %s)', 2),
'STD': (self.STD, r'%s周期%s的样本标准差', 2),
'VAR': (self.VAR, r'%s周期%s的样本方差', 2),
'STDP': (self.STDP, r'%s周期%s的总体标准差', 2),
'VARP': (self.VARP, r'%s周期%s的总体方差', 2),
'MAX': (self.MAX, r'取最大值(%s, %s)', 2),
'MIN': (self.MIN, r'取最小值(%s, %s)', 2),
'COUNT': (self.COUNT, r'满足条件%s在统计周期%s内的个数', 2),
'ABS': (self.ABS, r'%s的绝对值', 1),
'SQRT': (self.SQRT, r'%s的平方根', 1),
'POW': (self.POW, r'%s的%s次方', 2),
'LOG': (self.LOG, r'%s的对数', 1),
'CONST': (self.CONST, r'%s的最后值为常量', 1),
'INSIST': (self.INSIST, r'%s在周期%s到周期%s全为真', 3),
'LAST': (self.INSIST, r'%s在周期%s到周期%s全为真', 3),
'FILTER': (self.FILTER, r'过滤%s连续出现的%s个信号', 2),
'BARSLAST': (self.BARSLAST, r'满足条件%s到当前的周期数', 1),
'AVEDEV' : (self.AVEDEV, r'%s的周期%s的平均绝对偏差', 2),
'MA': (self.MA, r'%s的%s日简单移动平均', 2),
'EMA': (self.EMA, r'%s的%s日指数移动平均', 2),
'EXPEMA': (self.EMA, r'%s的%s日指数移动平均', 2),
'MEMA' : (self.MEMA, r'%s的%s周期平滑指数移动平均', 2),
'EXPMEMA' : (self.MEMA, r'%s的%s周期平滑指数移动平均', 2),
'DMA' : (self.DMA, r'%s的%s周期动态平均', 2),
'SMA' : (self.SMA, r'%s的%s周期(权重:%s)动态平均', 3),
'CONV': (self.CONV, r'%s与%s的%s周期卷积', 3),
'SAR' : (self.SAR, r'周期为%s步长为%s极值为%s的抛物转向', 3),
'SLOPE': (self.SLOPE, r'%s的周期为%s的线性回归斜率', 2),
'CLAMP': (self.CLAMP, r'限定%s的输出在(%s, %s)之间', 3),
'FORCAST': (self.FORCAST, r'%s的周期为%s的线性预测', 2),
'DRAWFUNC': (self.DRAWFUNC, r'DRAWFUNC(%s, %s, %s)', 3),
'DRAWICON': (self.DRAWICON, r'DRAWICON(%s, %s, %s)', 3),
'DRAWICONF': (self.DRAWICONF, r'DRAWICONF(%s, %s, %s)', 3),
'STICKLINE': (self.STICKLINE, r'STICKLINE(%s,%s,%s,%s,%s)', 5),
'DRAWKLINE' : (self.DRAWKLINE, r'DRAWKLINE(%s, %s, %s, %s)', 4),
'DRAWSKLINE' : (self.DRAWSKLINE, r'DRAWSKLINE(%s, %s, %s, %s)', 4),
'DRAWPKLINE' : (self.DRAWPKLINE, r'DRAWPKLINE(%s, %s, %s, %s, %s)', 5),
'DRAWNUMBER' : (self.DRAWNUMBER, r'DRAWNUMBER(%s, %s, %s)', 3),
'DRAWTEXT' : (self.DRAWTEXT, r'DRAWTEXT(%s, %s, %s)', 3),
'DRAWNULL' : (np.NaN, r'DRAWNULL', 0),
'DRAWGRID' : (self.DRAWGRID, r'DRAWGRID(%s)', 1),
'DRAWVOL' : (self.DRAWVOL, r'DRAWVOL(%s, %s)', 2),
'SIGNAL': (self.SIGNAL, r'从仓位数据%s导出指定的买入%s卖出%s信号指示',3),
'BASE': (self.BASE, r'创建%s的基准', 1),
'ASSET': (self.ASSET, r'根据价格%s,仓位%s和建仓比例%s创建资产', 3),
'SUGAR': (self.SUGAR, r'根据盈利情况进行调仓操作(仓位:%s, 价格:%s, 资产:%s)', 3),
'REMEMB': (self.REMEMB, r'记录价格(条件:%s, 值:%s)', 2),
'STOP':(self.STOP, r'止盈止损点(条件:%s, 价格:%s, 比例1:%s, 比例2:%s)', 4),
'HORIZON':(self.HORIZON, r'%s的周期为%s的地平线', 2)
}
def __mergeParam(self, param):
self.local = {}
#for k, v in self.const.items():
# self.local[k] = v
self.local.update(self.const)
if param is None:
return
#for k, v in param.items():
# self.local[k] = v
self.local.update(param)
def annotate(self, param = None):
"""
取注解, 返回文符串
"""
if self.ast is None:
print('This formula failed to be parsed.')
return None
self.__mergeParam(param)
return self.ast.annotate(self)
def paramDesc(self):
"""
取参数描述,返回参数名与描述
"""
if self.param_desc is None:
return ''
if self.param_desc == '':
return ''
return json.dumps(self.param_desc)
def get_figure(self):
"""
取绘图数据
在调用这个函数前,需要调用求值函数
调用求值函数,也可用于图表数据的刷新
"""
if self.ast is None:
print('This formula failed to be parsed.')
return None
return self.ast.figure(self)
def evaluate(self, param, fields = None, is_last_value = False):
"""
求值,设置股票代码,参数
param: 参数以dict形式给出,键名为参数的变量名
"""
if self.ast is None:
print('This formula failed to be parsed.')
raise ValueError('The formula failed to be parsed')
if isinstance(param, dict):
self.__mergeParam(param)
if isinstance(param, pd.DataFrame):
self._df = param
default_retval = self.ast.evaluate(self)
if default_retval is None:
raise ValueError('Failed to evaluate. The formula failed to be parsed.')
if fields is None:
return default_retval
retv = {}
for col in fields:
retv[col] = self.resolveSymbol(col, 0)
if not is_last_value:
return retv
last_values = np.array([v.iloc[-1] for k, v in retv.items()])
return last_values
def asDataFrame(self, columns, data_index = None):
if data_index is None:
close = self.resolveSymbol('CLOSE', 0)
if close is None or len(close) == 0:
return None
df = pd.DataFrame(index = close.index)
else:
df = pd.DataFrame(index = data_index)
for col in columns:
s = self.resolveSymbol(col, 0)
if s is not None:
df[col] = s
return df
def setSymbol(self, symbol, func_def):
old_def = None
if symbol in self.builtin:
old_def = self.builtin[symbol]
self.builtin[symbol] = func_def
return old_def
def resolveSymbol(self, symbol, n):
if n == 0:
if self._df is not None:
if symbol in self._df.columns:
return self._df[symbol]
if symbol in self.local:
variable = self.local[symbol]
if type(variable) == tuple:
variable = variable[0]
if variable is not None:
if hasattr(variable, '__call__'):
return variable()
return variable
symdef = None
if symbol in self.builtin:
symdef = self.builtin[symbol]
if n == symdef[2]:
return symdef[0]
if symdef is not None:
print('function: %s is resolved, expect %d parameters, but %d is given.' % (symbol, symdef[2], n))
if symbol in self.local:
funcdef = self.local[symbol]
if type(funcdef) == tuple:
if n == funcdef[2]:
func = funcdef[0]
else:
print('function: %s is resolved, expect %d parameters, but %d is given.' % (symbol, funcdef[2], n))
else:
func = funcdef
return func
return None
def resolveAnnotate(self, symbol):
if symbol in self.local:
variable = self.local[symbol]
if type(variable) != tuple:
return '[%s]' % symbol
return variable[1]
if symbol in self.builtin:
symdef = self.builtin[symbol]
return symdef[1]
return None
def registerFunctor(self, name, func, n):
self.builtin[name] = (func, 'DefineFunction', n)
############# 内部函数 #################
def addSymbol(self, symbol, value):
self.local[symbol] = value
if symbol in Formula.buy_kw:
if isinstance(value, pd.core.series.Series):
if value.dtype == bool:
self.buy = value
if symbol in Formula.sell_kw:
if isinstance(value, pd.core.series.Series):
if value.dtype == bool:
self.sell = value
### Formula Function Implementation ###
#引用函数
def HHV(self, param):
if param[1] == 0:
return pd.expanding_max(param[0])
return pd.rolling_max(param[0], param[1])
def LLV(self, param):
if param[1] == 0:
return pd.expanding_min(param[0])
return pd.rolling_min(param[0], param[1])
def REF(self, param):
return param[0].shift(param[1])
def EMA(self, param):
return pd.ewma(param[0], span=param[1], adjust = True)
def MA(self, param):
if param[1] == 0:
return pd.rolling_mean(param[0])
return pd.rolling_mean(param[0], param[1])
def SUM(self, param):
if param[1] == 0:
return pd.expanding_sum(param[0])
return pd.rolling_sum(param[0], param[1])
def CONST(self, param):
ret = pd.Series(index = param[0].index)
ret[:] = param[0][-1:].values[0]
return ret
def MEMA(self, param):
return pd.ewma(param[0], span=param[1] * 2 - 1, adjust = False)
def DMA(self, param):
df = pd.DataFrame(index = param[0].index)
df['X'] = param[0]
df['A'] = param[1]
class Averager:
def __init__(self):
self.Y = 0
self.start = True
def handleInput(self, row):
if self.start:
self.start = False
self.Y = row['X']
return self.Y
X = row['X']
A = row['A']
if A > 1:
A = 1
if A < 0:
A = 0
self.Y = A * X + (1 - A) * self.Y
return self.Y
avger = Averager()
result = df.apply(avger.handleInput, axis = 1, reduce = True)
return result
def SMA(self, param):
M = param[2]
if param[2] == 0:
M = 1
return pd.ewma(param[0], span = 2 * param[1] / M - 1)
def CONV(self, param):
df = pd.DataFrame(index = param[0].index)
df['X'] = param[0]
df['W'] = param[1]
class Convolution:
def __init__(self, N):
self.N = N
self.q = deque([], self.N)
self.tq = deque([], self.N)
self.s = 0
self.t = 0
def handleInput(self, row):
if len(self.q) < self.N:
if pd.isnull(row['W']) or pd.isnull(row['X']):
return np.NaN
self.q.append(row['W'] * row['X'])
self.tq.append(row['W'])
self.s += row['W'] * row['X']
self.t += row['W']
return np.NaN
ret = self.s / self.t
self.s -= self.q[0]
self.t -= self.tq[0]
delta_s = row['W'] * row['X']
delta_t = row['W']
self.s += delta_s
self.t += delta_t
self.q.append(delta_s)
self.tq.append(delta_t)
return ret
conv = Convolution(param[2])
result = df.apply(conv.handleInput, axis = 1, reduce = True)
return result
#算术逻辑函数
EPSLON = 0.0000001
def CROSS(self, param):
if not isinstance(param[0], pd.core.series.Series) and not isinstance(param[1], pd.core.series.Series):
print('Invalid data type is detected.')
return False
if not isinstance(param[0], pd.core.series.Series):
x1 = param[0]
x2 = param[0]
y1 = param[1].shift(1)
y2 = param[1]
if not isinstance(param[1], pd.core.series.Series):
x1 = param[0].shift(1)
x2 = param[0]
y1 = param[1]
y2 = param[1]
if isinstance(param[0], pd.core.series.Series) and isinstance(param[1], pd.core.series.Series):
x1 = param[0].shift(1)
x2 = param[0]
y1 = param[1].shift(1)
y2 = param[1]
return (x1 <= y1) & (x2 > y2)
def NOT(self, param):
if not isinstance(param[0], pd.core.series.Series):
if type(param[0]) != bool:
return (param[0] == 0)
else:
return not param[0]
if param[0].dtype == bool:
return (param[0] == False)
if param[0].dtype == float:
return (param[0] > -Formula.EPSLON) & (param[0] < Formula.EPSLON)
return (param[0] == 0)
def IF(self, param):
EPSLON = 0.0000001
if not isinstance(param[0], pd.core.series.Series):
if type(param[0]) == bool:
if param[0]:
return param[1]
else:
return param[2]
elif type(param[0]) == int:
if param[0] != 0:
return param[1]
else:
return param[2]
elif type(param[0]) == float:
if (param[0] < -Formula.EPSLON) or (param[0] > Formula.EPSLON):
return param[1]
else:
return param[2]
df = pd.DataFrame(index = param[0].index)
if param[0].dtype == bool:
df['X'] = param[0]
elif param[0].dtype == float:
df['X'] = ~ ((param[0] > -EPSLON) & (param[0] < EPSLON))
else:
df['X'] = (param[0] != 0)
df['A'] = param[1]
df['B'] = param[2]
def callback(row):
if row['X']:
return row['A']
else:
return row['B']
result = df.apply(callback, axis=1, reduce = True)
return result
def EVERY(self, param):
norm = self.IF([param[0], 1, 0])
result = pd.rolling_sum(norm, param[1])
return result == param[1]
def EXIST(self, param):
norm = self.IF([param[0], 1, 0])
result = pd.rolling_sum(norm, param[1])
return result > 0
def COUNT(self, param):
norm = self.IF([param[0], 1, 0])
result = pd.rolling_sum(norm, param[1])
return result
def INSIST(self, param):
norm = self.IF([param[0], 1, 0])
x1 = | pd.rolling_sum(norm, param[1]) | pandas.rolling_sum |
import logging
import uuid
from typing import Optional
import fire
import pandas as pd
import matplotlib.pyplot as plt
from core import Simulator, Simulation
from settings import *
from utils import list_dir
logger = logging.getLogger(__name__)
class Main:
@staticmethod
def show_strategies():
return "\n".join(f"* {strategy}" for strategy in SOCIAL_DISTANCING_VAR_STRATEGIES)
@staticmethod
def simulate(days=100, show=False, filename=""):
simulation = Simulation(days=days)
results_confirmed, results_interactions = simulation.run()
df = | pd.DataFrame(results_confirmed) | pandas.DataFrame |
from typing import Dict, Optional, Union, Callable, Tuple, List, Iterable
import pandas as pd
from scipy import stats
from .db import engine
import numpy as np
import seaborn as sns
import warnings
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
class Error(Exception):
"""Base class for exceptions in this module."""
pass
class InputError(Error):
"""Exception raised for errors in the input.
Attributes:
expression -- input expression in which the error occurred
message -- explanation of the error
"""
def __init__(self, expression, message):
self.expression = expression
self.message = message
# ==========================================================================
# Utility functions
# ==========================================================================
def calculate_timestep(
start_year: int, start_month: int, end_year: int, end_month: int
) -> int:
""" Utility function that converts a time range given a start date and end
date into a integer value.
Args:
start_year: The starting year (ex: 2012)
start_month: Starting month (1-12)
end_year: Ending year
end_month: Ending month
Returns:
The computed time step.
"""
assert start_year <= end_year, "Starting date cannot exceed ending date."
if start_year == end_year:
assert (
start_month <= end_month
), "Starting date cannot exceed ending date."
diff_year = end_year - start_year
year_to_month = diff_year * 12
return year_to_month - (start_month - 1) + (end_month - 1)
def get_data_value(
indicator: str,
country: Optional[str] = "South Sudan",
state: Optional[str] = None,
county: Optional[str] = None,
year: int = 2015,
month: int = 1,
unit: Optional[str] = None,
use_heuristic: bool = False,
) -> List[float]:
""" Get a indicator value from the delphi database.
Args:
indicator: Name of the target indicator variable.
country: Specified Country to get a value for.
state: Specified State to get value for.
year: Specified Year to get a value for.
month: Specified Month to get a value for.
unit: Specified Units to get a value for.
use_heuristic: a boolean that indicates whether or not use a built-in
heurstic for partially missing data. In cases where data for a given
year exists but no monthly data, setting this to true divides the
yearly value by 12 for any month.
Returns:
Specified float value given the specified parameters.
"""
query_base = " ".join(
[f"select * from indicator", f"where `Variable` like '{indicator}'"]
#[f"select Country, Year, Month, avg(Value) as Value, Unit from indicator", f"where `Variable` like '{indicator}'"]
)
query_parts = {"base": query_base}
if country is not None:
check_q = query_parts["base"] + f"and `Country` is '{country}'"
check_r = list(engine.execute(check_q))
if check_r == []:
warnings.warn(
f"Selected Country not found for {indicator}! Using default settings (South Sudan)!"
)
query_parts["country"] = f"and `Country` is 'South Sudan'"
else:
query_parts["country"] = f"and `Country` is '{country}'"
else:
query_parts["country"] = f"and `Country` is 'None'"
if state is not None:
check_q = query_parts["base"] + f"and `State` is '{state}'"
check_r = list(engine.execute(check_q))
if check_r == []:
warnings.warn(
(
f"Selected State not found for {indicator}! Using default ",
"settings (Getting data at Country level only instead of State ",
"level)!",
)
)
query_parts["state"] = f"and `State` is 'None'"
else:
query_parts["state"] = f"and `State` is '{state}'"
#else:
# query_parts["state"] = f"and `State` is 'None'"
if county is not None:
check_q = query_parts["base"] + f"and `County` is '{county}'"
check_r = list(engine.execute(check_q))
if check_r == []:
warnings.warn(
(
f"Selected County not found for {indicator}! Using default ",
"settings (Attempting to get data at state level instead)!",
)
)
query_parts["county"] = f"and `County` is 'None'"
else:
query_parts["county"] = f"and `County` is '{county}'"
#else:
# query_parts["county"] = f"and `County` is 'None'"
if unit is not None:
check_q = query_parts["base"] + f" and `Unit` is '{unit}'"
check_r = list(engine.execute(check_q))
if check_r == []:
warnings.warn(
f"Selected units not found for {indicator}! Falling back to default units!"
)
query_parts["unit"] = ""
else:
query_parts["unit"] = f"and `Unit` is '{unit}'"
query_parts["year"] = f"and `Year` is '{year}'"
query_parts["month"] = f"and `Month` is '{month}'"
#query_parts["groupby"] = f"group by `Year`, `Month`"
query = " ".join(query_parts.values())
#print(query)
results = list(engine.execute(query))
#print(results)
if results != []:
unit = sorted(list({r["Unit"] for r in results}))[0]
vals = [float(r["Value"]) for r in results if r["Unit"] == unit]
return vals
if not use_heuristic:
return []
query_parts["month"] = f"and `Month` is '0'"
query = " ".join(query_parts.values())
results = list(engine.execute(query))
if results != []:
unit = sorted(list({r["Unit"] for r in results}))[0]
vals = [float(r["Value"]) for r in results if r["Unit"] == unit]
return list(map(lambda x: x / 12, vals))
return []
# ==========================================================================
# Inference Output functions
# ==========================================================================
def data_to_list(
indicator: str,
start_year: int,
start_month: int,
end_year: int,
end_month: int,
use_heuristic: bool = False,
**kwargs,
) -> Tuple[List[str], List[List[int]]]:
""" Get the true values of the indicator variable given a start date and
end data. Allows for other specifications as well.
Args:
variable: Name of target indicator variable.
start_year: An integer, designates the starting year (ex: 2012).
start_month: An integer, starting month (1-12).
end_year: An integer, ending year.
end_month: An integer, ending month.
use_heuristic: a boolean that indicates whether or not use a built-in
heurstic for partially missing data. In cases where data for a given
year exists but no monthly data, setting this to true divides the
yearly value by 12 for any month.
**kwargs: These are options for which you can specify
country, state, units.
Returns:
Returns a tuple where the first element is a list of the specified
dates in year-month format and the second element is a
list of lists of the true data for a given indicator.
Each element of the outer list represents a time step and the inner
lists contain the data for that time point.
"""
country = kwargs.get("country", "South Sudan")
state = kwargs.get("state")
county = kwargs.get("county")
unit = kwargs.get("unit")
n_timesteps = calculate_timestep(
start_year, start_month, end_year, end_month
)
vals = []
year = start_year
month = start_month
date = []
for j in range(n_timesteps + 1):
vals.append(
get_data_value(
indicator,
country,
state,
county,
year,
month,
unit,
use_heuristic,
)
)
date.append(f"{year}-{month}")
if month == 12:
year = year + 1
month = 1
else:
month = month + 1
return date, vals
def mean_data_to_df(
indicator: str,
start_year: int,
start_month: int,
end_year: int,
end_month: int,
use_heuristic: bool = False,
ci: Optional[float] = None,
**kwargs,
) -> pd.DataFrame:
""" Get the true values of the indicator variable given a start date and
end data. Allows for other specifications as well.
variable: Name of target indicator variable.
start_year: An integer, designates the starting year (ex: 2012).
start_month: An integer, starting month (1-12).
end_year: An integer, ending year.
end_month: An integer, ending month.
use_heuristic: a boolean that indicates whether or not use a built-in
heurstic for partially missing data. In cases where data for a given
year exists but no monthly data, setting this to true divides the
yearly value by 12 for any month.
ci: confidence level. Only the mean is reported if left as None.
**kwargs: These are options for which you can specify
country, state, units.
Returns:
Pandas Dataframe containing true values for target node's indicator
variable. The values are indexed by date.
"""
date, vals = data_to_list(
indicator,
start_year,
start_month,
end_year,
end_month,
use_heuristic,
**kwargs,
)
if ci is not None:
data_mean = np.zeros((len(date), 3))
for i, obs in enumerate(vals):
if len(obs) > 1:
mean, _, _ = stats.bayes_mvs(obs, ci)
data_mean[i, 0] = mean[0]
data_mean[i, 1] = mean[1][0]
data_mean[i, 2] = mean[2][1]
else:
data_mean[i, 0] = np.mean(obs)
data_mean[i, 1] = np.nan
data_mean[i, 2] = np.nan
return pd.DataFrame(
data_mean,
date,
columns=[
f"{indicator}(True)",
f"{indicator}(True)(Lower Confidence Bound)",
f"{indicator}(True)(Upper Confidence Bound)",
],
)
else:
data_mean = np.zeros((len(date), 1))
for i, obs in enumerate(vals):
data_mean[i] = np.mean(obs)
return pd.DataFrame(data_mean, date, columns=[f"{indicator}(True)"])
def pred_to_array(
preds: Tuple[
Tuple[Tuple[int, int], Tuple[int, int]],
List[str],
List[List[Dict[str, Dict[str, float]]]],
],
indicator: str,
) -> np.ndarray:
""" Outputs raw predictions for a given indicator that were generated by
generate_prediction(). Each column is a time step and the rows are the
samples for that time step.
Args:
preds: This is the entire prediction set returned by the
generate_prediction() method in AnalysisGraph.cpp.
indicator: A string representing the indicator variable for which we
want predictions printed.
Returns:
np.ndarray
"""
_, pred_range, predictions = preds
time_range = len(pred_range)
m_samples = len(predictions)
pred_raw = np.zeros((m_samples, time_range))
for i in range(m_samples):
for j in range(time_range):
for _, inds in predictions[i][j].items():
if indicator in inds.keys():
pred_raw[i][j] = float(inds[indicator])
return pred_raw
def mean_pred_to_df(
preds: Tuple[
Tuple[Tuple[int, int], Tuple[int, int]],
List[str],
List[List[Dict[str, Dict[str, float]]]],
],
indicator: str,
ci: Optional[float] = 0.95,
true_vals: bool = False,
use_heuristic_for_true: bool = False,
**kwargs,
) -> pd.DataFrame:
""" Outputs mean predictions for a given indicator that were generated by
generate_prediction(). The rows are indexed by date. Other output includes
the confidence intervals for the mean predictions and with true_vals =
True, the true data values, residual error, and error bounds. Setting
true_vals = True, assumes that real data exists for the given prediction
range. A heuristic estimate is calculated for each missing data value in
the true dateset.
Args:
preds: This is the entire prediction set returned by the
generate_prediction() method in AnalysisGraph.cpp.
indicator: A string representing the indicator variable for which we
want mean predictions,etc printed.
ci: Confidence Level (as decimal). Default is 0.95 or 95%.
true_vals: A boolean, if set to True then the true data values,
residual errors, and error bounds are return in the dataframe. If set
to False (default), then only the mean predictions and confidence
intervals (for the mean predictions) are returned.
**kwargs: Here country, state, and units can be specified. The same
kwargs (excluding k), should have been passed to train_model.
Returns:
np.ndarray
"""
if ci is not None:
pred_raw = pred_to_array(preds, indicator)
_, pred_range, _ = preds
pred_stats = np.apply_along_axis(stats.bayes_mvs, 1, pred_raw.T, ci)[
:, 0
]
pred_mean = np.zeros((len(pred_range), 3))
for i, (mean, interval) in enumerate(pred_stats):
pred_mean[i, 0] = mean
pred_mean[i, 1] = interval[0]
pred_mean[i, 2] = interval[1]
mean_df = pd.DataFrame(
pred_mean,
columns=[
f"{indicator}(Mean Prediction)",
f"{indicator}(Lower Confidence Bound)",
f"{indicator}(Upper Confidence Bound)",
],
)
if true_vals == True:
warnings.warn(
"The selected output settings assume that real data exists "
"for the given prediction time range. Any missing data values are "
"filled with a heuristic estimate based on existing data."
)
start_date = pred_range[0]
start_year = int(start_date[0:4])
start_month = int(start_date[5:7])
end_date = pred_range[-1]
end_year = int(end_date[0:4])
end_month = int(end_date[5:7])
true_data_df = mean_data_to_df(
indicator,
start_year,
start_month,
end_year,
end_month,
use_heuristic_for_true,
ci,
**kwargs,
)
mean_df = mean_df.set_index(true_data_df.index)
error = mean_df.values - true_data_df.values[:, 0].reshape(-1, 1)
error_df = pd.DataFrame(
error,
columns=["Error", "Lower Error Bound", "Upper Error Bound"],
)
error_df = error_df.set_index(true_data_df.index)
return pd.concat(
[mean_df, true_data_df, error_df],
axis=1,
join_axes=[true_data_df.index],
)
else:
mean_df = mean_df.set_index(pd.Index(pred_range))
return mean_df
else:
pred_raw = pred_to_array(preds, indicator)
_, pred_range, _ = preds
pred_mean = np.mean(pred_raw, axis=0).reshape(-1, 1)
mean_df = pd.DataFrame(
pred_mean, columns=[f"{indicator}(Mean Prediction)"]
)
if true_vals == True:
warnings.warn(
"The selected output settings assume that real data exists "
"for the given prediction time range. Any missing data values are "
"filled with a heuristic estimate based on existing data."
)
start_date = pred_range[0]
start_year = int(start_date[0:4])
start_month = int(start_date[5:7])
end_date = pred_range[-1]
end_year = int(end_date[0:4])
end_month = int(end_date[5:7])
true_data_df = mean_data_to_df(
indicator,
start_year,
start_month,
end_year,
end_month,
use_heuristic_for_true,
ci,
**kwargs,
)
mean_df = mean_df.set_index(pd.Index(pred_range))
error = mean_df.values - true_data_df.values.reshape(-1, 1)
error_df = | pd.DataFrame(error, columns=["Error"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""
Analyzes code age in a git repository
Writes reports in the following locations
e.g. For repository "cpython"
[root] Defaults to ~/git.stats
├── cpython Directory for https://github.com/python/cpython.git
│ └── reports
│ ├── 2011-03-06.d68ed6fc.2_0 Revision `d68ed6fc` which was created on 2011-03-06 on
│ │ │ branch `2.0`.
│ │ └── __c.__cpp.__h Report on *.c, *.cpp and *.h files in this revision
│ │ ├── Guido_van_Rossum Sub-report on author `<NAME>`
│ │ │ ├── code-age.png Graph of code age. LoC / day vs date
│ │ │ ├── code-age.txt List of commits in the peaks in the code-age.png graph
│ │ │ ├── details.csv LoC in each directory in for these files and authors
│ │ │ ├── newest-commits.txt List of newest commits for these files and authors
│ │ │ └── oldest-commits.txt List of oldest commits for these files and authors
"""
from __future__ import division, print_function
import subprocess
from subprocess import CalledProcessError
from collections import defaultdict, Counter
import sys
import time
import re
import os
import stat
import glob
import errno
import numpy as np
from scipy import signal
import pandas as pd
from pandas import Series, DataFrame, Timestamp
import matplotlib
import matplotlib.pylab as plt
from matplotlib.pylab import cycler
import bz2
from multiprocessing import Pool, cpu_count
from multiprocessing.pool import ThreadPool
import pygments
from pygments import lex
from pygments.token import Text, Comment, Punctuation, Literal
from pygments.lexers import guess_lexer_for_filename
# Python 2 / 3 stuff
PY2 = sys.version_info[0] < 3
try:
import cPickle as pickle
except ImportError:
import pickle
try:
reload(sys)
sys.setdefaultencoding('utf-8')
except:
pass
#
# Configuration.
#
CACHE_FILE_VERSION = 3 # Update when making incompatible changes to cache file format
TIMEZONE = 'Australia/Melbourne' # The timezone used for all commit times. TODO Make configurable
SHA_LEN = 8 # The number of characters used when displaying git SHA-1 hashes
STRICT_CHECKING = False # For validating code.
N_BLAME_PROCESSES = max(1, cpu_count() - 1) # Number of processes to use for blaming
N_SHOW_THREADS = 8 # Number of threads for running the many git show commands
DO_MULTIPROCESSING = True # For test non-threaded performance
# Set graphing style
matplotlib.style.use('ggplot')
plt.rcParams['axes.prop_cycle'] = cycler('color', ['b', 'y', 'k', '#707040', '#404070'])
plt.rcParams['savefig.dpi'] = 300
PATH_MAX = 255
# Files that we don't analyze. These are files that don't have lines of code so that blaming
# doesn't make sense.
IGNORED_EXTS = {
'.air', '.bin', '.bmp', '.cer', '.cert', '.der', '.developerprofile', '.dll', '.doc', '.docx',
'.exe', '.gif', '.icns', '.ico', '.jar', '.jpeg', '.jpg', '.keychain', '.launch', '.pdf',
'.pem', '.pfx', '.png', '.prn', '.so', '.spc', '.svg', '.swf', '.tif', '.tiff', '.xls', '.xlsx',
'.tar', '.zip', '.gz', '.7z', '.rar',
'.patch',
'.dump',
'.h5'
}
def _is_windows():
"""Returns: True if running on a MS-Windows operating system."""
try:
sys.getwindowsversion()
except:
return False
else:
return True
IS_WINDOWS = _is_windows()
if IS_WINDOWS:
import win32api
import win32process
import win32con
def lowpriority():
""" Set the priority of the process to below-normal.
http://stackoverflow.com/questions/1023038/change-process-priority-in-python-cross-platform
"""
pid = win32api.GetCurrentProcessId()
handle = win32api.OpenProcess(win32con.PROCESS_ALL_ACCESS, True, pid)
win32process.SetPriorityClass(handle, win32process.BELOW_NORMAL_PRIORITY_CLASS)
else:
def lowpriority():
os.nice(1)
class ProcessPool(object):
"""Package of Pool and ThreadPool for 'with' usage.
"""
SINGLE = 0
THREAD = 1
PROCESS = 2
def __init__(self, process_type, n_pool):
if not DO_MULTIPROCESSING:
process_type = ProcessPool.SINGLE
self.process_type = process_type
if process_type != ProcessPool.SINGLE:
clazz = ThreadPool if process_type == ProcessPool.THREAD else Pool
self.pool = clazz(n_pool)
def __enter__(self):
return self
def imap_unordered(self, func, args_iter):
if self.process_type != ProcessPool.SINGLE:
return self.pool.imap_unordered(func, args_iter)
else:
return map(func, args_iter)
def __exit__(self, exc_type, exc_value, traceback):
if self.process_type != ProcessPool.SINGLE:
self.pool.terminate()
def sha_str(sha):
"""The way we show git SHA-1 hashes in reports."""
return sha[:SHA_LEN]
def date_str(date):
"""The way we show dates in reports."""
return date.strftime('%Y-%m-%d')
DAY = pd.Timedelta('1 days') # 24 * 3600 * 1e9 in pandas nanosec time
# Max date accepted for commits. Clearly a sanity check
MAX_DATE = Timestamp('today').tz_localize(TIMEZONE) + DAY
def to_timestamp(date_s):
"""Convert string `date_s' to pandas Timestamp in `TIMEZONE`
NOTE: The idea is to get all times in one timezone.
"""
return Timestamp(date_s).tz_convert(TIMEZONE)
def delta_days(t0, t1):
"""Returns: time from `t0` to `t1' in days where t0 and t1 are Timestamps
Returned value is signed (+ve if t1 later than t0) and fractional
"""
return (t1 - t0).total_seconds() / 3600 / 24
concat = ''.join
path_join = os.path.join
def decode_to_str(bytes):
"""Decode byte list `bytes` to a unicode string trying utf-8 encoding first then latin-1.
"""
if bytes is None:
return None
try:
return bytes.decode('utf-8')
except:
return bytes.decode('latin-1')
def save_object(path, obj):
"""Save object `obj` to `path` after bzipping it
"""
# existing_pkl is for recovering from bad pickles
existing_pkl = '%s.old.pkl' % path
if os.path.exists(path) and not os.path.exists(existing_pkl):
os.rename(path, existing_pkl)
with bz2.BZ2File(path, 'w') as f:
# protocol=2 makes pickle usable by python 2.x
pickle.dump(obj, f, protocol=2)
# Delete existing_pkl if load_object succeeds
load_object(path)
if os.path.exists(path) and os.path.exists(existing_pkl):
os.remove(existing_pkl)
def load_object(path, default=None):
"""Load object from `path`
"""
if default is not None and not os.path.exists(path):
return default
try:
with bz2.BZ2File(path, 'r') as f:
return pickle.load(f)
except:
print('load_object(%s, %s) failed' % (path, default), file=sys.stderr)
raise
def mkdir(path):
"""Create directory `path` including all intermediate-level directories and ignore
"already exists" errors.
"""
try:
os.makedirs(path)
except OSError as e:
if not (e.errno == errno.EEXIST and os.path.isdir(path)):
raise
def df_append_totals(df_in):
"""Append row and column totals to Pandas DataFrame `df_in`, remove all zero columns and sort
rows and columns by total.
"""
assert 'Total' not in df_in.index
assert 'Total' not in df_in.columns
rows, columns = list(df_in.index), list(df_in.columns)
df = DataFrame(index=rows + ['Total'], columns=columns + ['Total'])
df.iloc[:-1, :-1] = df_in
df.iloc[:, -1] = df.iloc[:-1, :-1].sum(axis=1)
df.iloc[-1, :] = df.iloc[:-1, :].sum(axis=0)
row_order = ['Total'] + sorted(rows, key=lambda r: -df.loc[r, 'Total'])
column_order = ['Total'] + sorted(columns, key=lambda c: -df.loc['Total', c])
df = df.reindex_axis(row_order, axis=0)
df = df.reindex_axis(column_order, axis=1)
empties = [col for col in df.columns if df.loc['Total', col] == 0]
df.drop(empties, axis=1, inplace=True)
return df
def moving_average(series, window):
"""Returns: Weighted moving average of pandas Series `series` as a pandas Series.
Weights are a triangle of width `window`.
NOTE: If window is greater than the number of items in series then smoothing may not work
well. See first few lines of function code.
"""
if len(series) < 10:
return series
window = min(window, len(series))
weights = np.empty(window, dtype=np.float)
radius = (window - 1) / 2
for i in range(window):
weights[i] = radius + 1 - abs(i - radius)
ma = np.convolve(series, weights, mode='same')
assert ma.size == series.size, ([ma.size, ma.dtype], [series.size, series.dtype], window)
sum_raw = series.sum()
sum_ma = ma.sum()
if sum_ma:
ma *= sum_raw / sum_ma
return Series(ma, index=series.index)
def procrustes(s, width=100):
"""Returns: String `s` fitted `width` or fewer chars, removing middle characters if necessary.
"""
width = max(20, width)
if len(s) > width:
notch = int(round(width * 0.6)) - 5
end = width - 5 - notch
return '%s ... %s' % (s[:notch], s[-end:])
return s
RE_EXT = re.compile(r'^\.\w+$')
RE_EXT_NUMBER = re.compile(r'^\.\d+$')
def get_ext(path):
"""Returns: extension of file `path`
"""
parts = os.path.splitext(path)
if not parts:
ext = '[None]'
else:
ext = parts[-1]
if not RE_EXT.search(ext) or RE_EXT_NUMBER.search(ext):
ext = ''
return ext
def exec_output(command, require_output):
"""Executes `command` which is a list of strings. If `require_output` is True then raise an
exception is there is no stdout.
Returns: The stdout of the child process as a string.
"""
# TODO save stderr and print it on error
try:
output = subprocess.check_output(command)
except:
print('exec_output failed: command=%s' % ' '.join(command), file=sys.stderr)
raise
if require_output and not output:
raise RuntimeError('exec_output: command=%s' % command)
return decode_to_str(output)
def exec_output_lines(command, require_output, sep=None):
"""Executes `command` which is a list of strings. If `require_output` is True then raise an
exception is there is no stdout.
Returns: The stdout of the child process as a list of strings, one string per line.
"""
if sep is not None:
return exec_output(command, require_output).split(sep)
else:
return exec_output(command, require_output).splitlines()
def exec_headline(command):
"""Execute `command` which is a list of strings.
Returns: The first line stdout of the child process.
"""
return exec_output(command, True).splitlines()[0]
def git_file_list(path_patterns=()):
"""Returns: List of files in current git revision matching `path_patterns`.
This is basically git ls-files.
"""
# git ls-files -z returns a '\0' separated list of files terminated with '\0\0'
bin_list = exec_output_lines(['git', 'ls-files', '-z', '--exclude-standard'] + path_patterns,
False, '\0')
file_list = []
for path in bin_list:
if not path:
break
file_list.append(path)
return file_list
def git_pending_list(path_patterns=()):
"""Returns: List of git pending files matching `path_patterns`.
"""
return exec_output_lines(['git', 'diff', '--name-only'] + path_patterns, False)
def git_file_list_no_pending(path_patterns=()):
"""Returns: List of non-pending files in current git revision matching `path_patterns`.
"""
file_list = git_file_list(path_patterns)
pending = set(git_pending_list(path_patterns))
return [path for path in file_list if path not in pending]
def git_diff(rev1, rev2):
"""Returns: List of files that differ in git revisions `rev1` and `rev2`.
"""
return exec_output_lines(['git', 'diff', '--name-only', rev1, rev2], False)
def git_show_oneline(obj):
"""Returns: One-line description of a git object `obj`, which is typically a commit.
https://git-scm.com/docs/git-show
"""
return exec_headline(['git', 'show', '--oneline', '--quiet', obj])
def git_date(obj):
"""Returns: Date of a git object `obj`, which is typically a commit.
NOTE: The returned date is standardized to timezone TIMEZONE.
"""
date_s = exec_headline(['git', 'show', '--pretty=format:%ai', '--quiet', obj])
return to_timestamp(date_s)
RE_REMOTE_URL = re.compile(r'(https?://.*/[^/]+(?:\.git)?)\s+\(fetch\)')
RE_REMOTE_NAME = re.compile(r'https?://.*/(.+?)(\.git)?$')
def git_remote():
"""Returns: The remote URL and a short name for the current repository.
"""
# $ git remote -v
# origin https://github.com/FFTW/fftw3.git (fetch)
# origin https://github.com/FFTW/fftw3.git (push)
try:
output_lines = exec_output_lines(['git', 'remote', '-v'], True)
except Exception as e:
print('git_remote error: %s' % e)
return 'unknown', 'unknown'
for line in output_lines:
m = RE_REMOTE_URL.search(line)
if not m:
continue
remote_url = m.group(1)
remote_name = RE_REMOTE_NAME.search(remote_url).group(1)
return remote_url, remote_name
raise RuntimeError('No remote')
def git_describe():
"""Returns: git describe of current revision.
"""
return exec_headline(['git', 'describe', '--always'])
def git_name():
"""Returns: git name of current revision.
"""
return ' '.join(exec_headline(['git', 'name-rev', 'HEAD']).split()[1:])
def git_current_branch():
"""Returns: git name of current branch or None if there is no current branch (detached HEAD).
"""
branch = exec_headline(['git', 'rev-parse', '--abbrev-ref', 'HEAD'])
if branch == 'HEAD': # Detached HEAD?
branch = None
return branch
def git_current_revision():
"""Returns: SHA-1 of current revision.
"""
return exec_headline(['git', 'rev-parse', 'HEAD'])
def git_revision_description():
"""Returns: Our best guess at describing the current revision"""
description = git_current_branch()
if not description:
description = git_describe()
return description
RE_PATH = re.compile(r'''[^a-z^0-9^!@#$\-+=_\[\]\{\}\(\)^\x7f-\xffff]''', re.IGNORECASE)
RE_SLASH = re.compile(r'[\\/]+')
def normalize_path(path):
"""Returns: `path` without leading ./ and trailing / . \ is replaced by /
"""
path = RE_SLASH.sub('/', path)
if path.startswith('./'):
path = path[2:]
if path.endswith('/'):
path = path[:-1]
return path
def clean_path(path):
"""Returns: `path` with characters that are illegal in filenames replaced with '_'
"""
return RE_PATH.sub('_', normalize_path(path))
def git_blame_text(path):
"""Returns: git blame text for file `path`
"""
if PY2:
path = path.encode(sys.getfilesystemencoding())
return exec_output(['git', 'blame', '-l', '-f', '-w', '-M', path], False)
RE_BLAME = re.compile(r'''
\^*([0-9a-f]{4,})\s+
.+?\s+
\(
(.+?)\s+
(\d{4}-\d{2}-\d{2}\s+\d{2}:\d{2}:\d{2}\s+[+-]\d{4})
\s+(\d+)
\)''',
re.DOTALL | re.MULTILINE | re.VERBOSE)
def _debug_check_dates(max_date, sha_date_author, path_sha_loc):
"""Debug code to validate dates in `sha_date_author`, `path_sha_loc`
"""
if not STRICT_CHECKING:
return
assert max_date <= MAX_DATE, max_date
for path, sha_loc in path_sha_loc.items():
for sha, loc in sha_loc.items():
if loc <= 1:
continue
assert sha in sha_date_author, '%s not in sha_date_author' % [sha, path]
date, _ = sha_date_author[sha]
assert date <= max_date, ('date > max_date', sha, loc, [date, max_date], path)
class GitException(Exception):
def __init__(self, msg=None):
super(GitException, self).__init__(msg)
self.git_msg = msg
if IS_WINDOWS:
RE_LINE = re.compile(r'(?:\r\n|\n)+')
else:
RE_LINE = re.compile(r'[\n]+')
def _parse_blame(max_date, text, path):
"""Parses git blame output `text` and extracts LoC for each git hash found
max_date: Latest valid date for a commit
text: A string containing the git blame output of file `path`
path: Path of blamed file. Used only for constructing error messages in this function
Returns: line_sha_date_author {line_n: (sha, date, author)} over all lines in the file
"""
line_sha_date_author = {}
lines = RE_LINE.split(text)
while lines and not lines[-1]:
lines.pop()
if not lines:
raise GitException('is empty')
for i, ln in enumerate(lines):
if not ln:
continue
m = RE_BLAME.match(ln)
if not m:
raise GitException('bad line')
if m.group(2) == 'Not Committed Yet':
continue
sha = m.group(1)
author = m.group(2)
date_s = m.group(3)
line_n = int(m.group(4))
author = author.strip()
if author == '':
author = '<>'
assert line_n == i + 1, 'line_n=%d,i=%d\n%s\n%s' % (line_n, i, path, m.group(0))
assert author.strip() == author, 'author="%s\n%s:%d\n%s",' % (
author, path, i + 1, ln[:200])
date = to_timestamp(date_s)
if date > max_date:
raise GitException('bad date. sha=%s,date=%s' % (sha, date))
line_sha_date_author[line_n] = sha, date, author
if not line_sha_date_author:
raise GitException('is empty')
return line_sha_date_author
def _compute_author_sha_loc(line_sha_date_author, sloc_lines):
"""
line_sha_date_author: {line_n: (sha, date, author)}
sloc_lines: {line_n} for line_n in line_sha_date_author that are source code or None
Returns: sha_date_author, sha_aloc, sha_sloc
sha_date_author: {sha: (date, author)} over all SHA-1 hashes in
`line_sha_date_author`
sha_aloc: {sha: aloc} over all SHA-1 hashes found in `line_sha_date_author`. aloc
is "all lines of code"
sha_sloc: {sha: sloc} over SHA-1 hashes found in `line_sha_date_author` that are in
`sloc_lines`. sloc is "source lines of code". If sloc_lines is None then
sha_sloc is None.
"""
sha_date_author = {}
sha_aloc = Counter()
sha_sloc = None
for sha, date, author in line_sha_date_author.values():
sha_aloc[sha] += 1
sha_date_author[sha] = (date, author)
if not sha_aloc:
raise GitException('is empty')
if sloc_lines is not None:
sha_sloc = Counter()
counted_lines = set(line_sha_date_author.keys()) & sloc_lines
for line_n in counted_lines:
sha, _, _ = line_sha_date_author[line_n]
sha_sloc[sha] += 1
return sha_date_author, sha_aloc, sha_sloc
def get_ignored_files(gitstatsignore):
if gitstatsignore is None:
gitstatsignore = 'gitstatsignore'
else:
assert os.path.exists(gitstatsignore), 'gitstatsignore file "%s"' % gitstatsignore
if not gitstatsignore or not os.path.exists(gitstatsignore):
return set()
ignored_files = set()
with open(gitstatsignore, 'rt') as f:
for line in f:
line = line.strip('\n').strip()
if not line:
continue
ignored_files.update(git_file_list([line]))
return ignored_files
class Persistable(object):
"""Base class that
a) saves to disk,
catalog: a dict of objects
summary: a dict describing catalog
manifest: a dict of sizes of objects in a catalog
b) loads them from disk
Derived classes must contain a dict data member called `TEMPLATE` that gives the keys of the
data members to save / load and default constructors for each key.
"""
@staticmethod
def make_data_dir(path):
return path_join(path, 'data')
@staticmethod
def update_dict(base_dict, new_dict):
for k, v in new_dict.items():
if k in base_dict:
base_dict[k].update(v)
else:
base_dict[k] = v
return base_dict
def _make_path(self, name):
return path_join(self.base_dir, name)
def __init__(self, summary, base_dir):
"""Initialize the data based on TEMPLATE and set summary to `summary`.
summary: A dict giving a summary of the data to be saved
base_dir: Directory that summary, data and manifest are to be saved to
"""
assert 'TEMPLATE' in self.__class__.__dict__, 'No TEMPLATE in %s' % self.__class__.__dict__
self.base_dir = base_dir
self.data_dir = Persistable.make_data_dir(base_dir)
self.summary = summary.copy()
self.catalog = {k: v() for k, v in self.__class__.TEMPLATE.items()}
for k, v in self.catalog.items():
assert hasattr(v, 'update'), '%s.TEMPLATE[%s] does not have update(). type=%s' % (
self.__class__.__name__, k, type(v))
def _load_catalog(self):
catalog = load_object(self._make_path('data.pkl'), {})
if catalog.get('CACHE_FILE_VERSION', 0) != CACHE_FILE_VERSION:
return {}
del catalog['CACHE_FILE_VERSION']
return catalog
def load(self):
catalog = self._load_catalog()
if not catalog:
return False
Persistable.update_dict(self.catalog, catalog) # !@#$ Use toolz
path = self._make_path('summary')
if os.path.exists(path):
self.summary = eval(open(path, 'rt').read())
return True
def save(self):
# Load before saving in case another instance of this script is running
path = self._make_path('data.pkl')
if os.path.exists(path):
catalog = self._load_catalog()
self.catalog = Persistable.update_dict(catalog, self.catalog)
# Save the data, summary and manifest
mkdir(self.base_dir)
self.catalog['CACHE_FILE_VERSION'] = CACHE_FILE_VERSION
save_object(path, self.catalog)
open(self._make_path('summary'), 'wt').write(repr(self.summary))
manifest = {k: len(v) for k, v in self.catalog.items() if k != 'CACHE_FILE_VERSION'}
manifest['CACHE_FILE_VERSION'] = CACHE_FILE_VERSION
open(self._make_path('manifest'), 'wt').write(repr(manifest))
def __repr__(self):
return repr([self.base_dir, {k: len(v) for k, v in self.catalog.items()}])
class BlameRepoState(Persistable):
"""Repository level persisted data structures
Currently this is just sha_date_author.
"""
TEMPLATE = {'sha_date_author': lambda: {}}
class BlameRevState(Persistable):
"""Revision level persisted data structures
The main structures are path_sha_aloc and path_sha_sloc.
"""
TEMPLATE = {
'path_sha_aloc': lambda: {}, # aloc for all LoC
'path_sha_sloc': lambda: {}, # sloc for source LoC
'path_set': lambda: set(),
'bad_path_set': lambda: set(),
}
def get_lexer(path, text):
try:
return guess_lexer_for_filename(path, text[:1000])
except pygments.util.ClassNotFound:
return None
COMMMENTS = {
Comment,
Literal.String.Doc,
Comment.Multiline,
}
class LineState(object):
def __init__(self):
self.sloc = set()
self.tokens = []
self.lnum = 0
self.ltype = None
def tokens_to_lines(self, has_eol):
is_comment = self.ltype in COMMMENTS
is_blank = self.ltype == Text
text = concat(self.tokens)
if has_eol:
lines = text[:-1].split('\n')
else:
lines = text.spit('\n')
for line in lines:
self.lnum += 1
if not (is_comment or is_blank):
self.sloc.add(self.lnum)
self.tokens = []
self.ltype = None
def get_sloc_lines(path):
"""Determine the lines in file `path` that are source code. i.e. Not space or comments.
This requires a parser for this file type, which exists for most source code files in the
Pygment module.
Returns: If a Pygment lexer can be found for file `path`
{line_n} i.e. set of 1-offset line number for lines in `path` that are source.
Otherwise None
"""
with open(path, 'rb') as f:
text = decode_to_str(f.read())
lexer = get_lexer(path, text)
if lexer is None:
return None
line_state = LineState()
for ttype, value in lex(text, lexer):
if not line_state.ltype:
line_state.ltype = ttype
elif line_state.ltype == Text:
if ttype is not None:
line_state.ltype = ttype
elif line_state.ltype == Punctuation:
if ttype is not None and ttype != Text:
line_state.ltype = ttype
if value:
line_state.tokens.append(value)
if value.endswith('\n'):
line_state.tokens_to_lines(True)
return line_state.sloc
def _task_extract_author_sha_loc(args):
"""Wrapper around blame and comment detection code to allow it to be executed by a
multiprocessing Pool.
Runs git blame and parses output to extract LoC by sha for all the sha's (SHA-1 hashes) in
the blame output.
Runs a Pygment lexer over the file if there is a matching lexer and combines this with the
blame parse to extract SLoC for each git hash found
args: max_date, path
max_date: Latest valid date for a commit
path: path of file to analyze
Returns: path, sha_date_author, sha_loc, sha_sloc, exception
path: from `args`
sha_date_author: {sha: (date, author)} over all SHA-1 hashes found in `path`
sha_aloc: {sha: aloc} over all SHA-1 hashes found in `path`. aloc = all lines counts
sha_sloc: {sha: sloc} over all SHA-1 hashes found in `path`. sloc = source lines counts
"""
max_date, path = args
sha_date_author, sha_aloc, sha_sloc, exception = None, None, None, None
try:
text = git_blame_text(path)
line_sha_date_author = _parse_blame(max_date, text, path)
sloc_lines = get_sloc_lines(path)
sha_date_author, sha_aloc, sha_sloc = _compute_author_sha_loc(line_sha_date_author,
sloc_lines)
except Exception as e:
exception = e
if not DO_MULTIPROCESSING and not isinstance(e, (GitException, CalledProcessError,
IsADirectoryError)):
print('_task_extract_author_sha_loc: %s: %s' % (type(e), e), file=sys.stderr)
raise
return path, sha_date_author, sha_aloc, sha_sloc, exception
class BlameState(object):
"""A BlameState contains data from `git blame` that are used to compute reports.
This data can take a long time to generate so we allow it to be saved to and loaded from
disk so that it can be reused between runs.
Data members: (All are read-only)
repo_dir
sha_date_author
path_sha_aloc
path_sha_sloc
path_set
bad_path_set
Typical usage:
blame_state.load() # Load existing data from disk
changed = blame_state.update_data(file_set) # Blame files in file_set to update data
if changed:
blame_state.save() # Save updated data to disk
Internal members: 'repo_dir', '_repo_state', '_rev_state', '_repo_base_dir'
Disk storage
------------
<repo_base_dir> Defaults to ~/git.stats/<repository name>
└── cache
├── 241d0c54 Data for revision 241d0c54
│ ├── data.pkl The data in a bzipped pickle.
│ ├── manifest Python file with dict of data keys and lengths
│ └── summary Python file with dict of summary date
...
├
├── e7a3e5c4 Data for revision e7a3e5c4
│ ├── data.pkl
│ ├── manifest
│ └── summary
├── data.pkl Repository level data
├── manifest
└── summary
"""
def _debug_check(self):
"""Debugging code to check consistency of the data in a BlameState
"""
if not STRICT_CHECKING:
return
assert 'path_sha_aloc' in self._rev_state.catalog
path_sha_loc = self.path_sha_aloc
path_set = self.path_set
for path, sha_loc in path_sha_loc.items():
assert path in path_set, '%s not in self.path_set' % path
for sha, loc in sha_loc.items():
date, author = self.sha_date_author[sha]
if set(self.path_sha_aloc.keys()) | self.bad_path_set != self.path_set:
for path in sorted((set(self.path_sha_aloc.keys()) | self.bad_path_set) - self.path_set)[:20]:
print('!@!', path in self.path_sha_aloc, path in self.bad_path_set)
assert set(self.path_sha_aloc.keys()) | self.bad_path_set == self.path_set, (
'path sets wrong %d %d\n'
'(path_sha_aloc | bad_path_set) - path_set: %s\n'
'path_set - (path_sha_aloc | bad_path_set): %s\n' % (
len((set(self.path_sha_aloc.keys()) | self.bad_path_set) - self.path_set),
len(self.path_set - (set(self.path_sha_aloc.keys()) | self.bad_path_set)),
sorted((set(self.path_sha_aloc.keys()) | self.bad_path_set) - self.path_set)[:20],
sorted(self.path_set - (set(self.path_sha_aloc.keys()) | self.bad_path_set))[:20]
))
def __init__(self, repo_base_dir, repo_summary, rev_summary):
"""
repo_base_dir: Root of data saved for this repository. This is <repo_dir> in the storage
diagram. Typically ~/git.stats/<repository name>
repo_summary = {
'remote_url': remote_url,
'remote_name': remote_name,
}
rev_summary = {
'revision_sha': revision_sha,
'branch': git_current_branch(),
'description': description,
'name': git_name(),
'date': revision_date,
}
"""
self._repo_base_dir = repo_base_dir
self._repo_dir = path_join(repo_base_dir, 'cache')
self._repo_state = BlameRepoState(repo_summary, self.repo_dir)
rev_dir = path_join(self._repo_state.base_dir,
sha_str(rev_summary['revision_sha']))
self._rev_state = BlameRevState(rev_summary, rev_dir)
def copy(self, rev_dir):
"""Returns: A copy of self with its rev_dir member replaced by `rev_dir`
"""
blame_state = BlameState(self._repo_base_dir, self._repo_state.summary,
self._rev_state.summary)
blame_state._rev_state.base_dir = rev_dir
return blame_state
def load(self, max_date):
"""Loads a previously saved copy of it data from disk.
Returns: self
"""
valid_repo = self._repo_state.load()
valid_rev = self._rev_state.load()
if STRICT_CHECKING:
if max_date is not None:
_debug_check_dates(max_date, self.sha_date_author, self.path_sha_aloc)
assert 'path_sha_aloc' in self._rev_state.catalog, self._rev_state.catalog.keys()
assert 'path_sha_sloc' in self._rev_state.catalog, self._rev_state.catalog.keys()
self._debug_check()
return self, valid_repo and valid_rev
def save(self):
"""Saves a copy of its data to disk
"""
self._repo_state.save()
self._rev_state.save()
if STRICT_CHECKING:
self.load(None)
def __repr__(self):
return repr({k: repr(v) for k, v in self.__dict__.items()})
@property
def repo_dir(self):
"""Returns top directory for this repo's cached data.
Typically ~/git.stats/<repository name>/cache
"""
return self._repo_dir
@property
def sha_date_author(self):
"""Returns: {sha: (date, author)} for all commits that have been found in blaming this
repository. sha is SHA-1 hash of commit
This is a per-repository dict.
"""
return self._repo_state.catalog['sha_date_author']
@property
def path_sha_aloc(self):
"""Returns: {path: [(sha, loc)]} for all files that have been found in blaming this
revision of this repository.
path_sha_loc[path] is a list of (sha, loc) where sha = SHA-1 hash of commit and
loc = lines of code from that commit in file `path`
This is a per-revision dict.
"""
return self._rev_state.catalog['path_sha_aloc']
@property
def path_sha_sloc(self):
"""Returns: {path: [(sha, loc)]} for all files that have been found in blaming this
revision of this repository.
path_sha_aloc[path] is a list of (sha, loc) where sha = SHA-1 hash of commit and
loc = lines of code from that commit in file `path`
This is a per-revision dict.
"""
return self._rev_state.catalog['path_sha_sloc']
@property
def path_sha_sloc(self):
"""Returns: {path: [(sha, loc)]} for all files that have been found in blaming this
revision of this repository.
path_sha_sloc[path] is a list of (sha, loc) where sha = SHA-1 hash of commit and
loc = lines of code from that commit in file `path`
This is a per-revision dict.
"""
return self._rev_state.catalog['path_sha_sloc']
@property
def path_set(self):
"""Returns: set of paths of files that have been attempted to blame in this revision.
This is a per-revision dict.
"""
return self._rev_state.catalog['path_set']
@property
def bad_path_set(self):
"""Returns: set of paths of files that have been unsuccessfully attempted to blame in this
revision.
bad_path_set ∪ path_sha_aloc.keys() == path_set
This is a per-revision dict.
"""
return self._rev_state.catalog['bad_path_set']
def _get_peer_revisions(self):
"""Returns: data dicts of all revisions that have been blamed for this repository except
this revision.
"""
peer_dirs = (rev_dir for rev_dir in glob.glob(path_join(self.repo_dir, '*'))
if rev_dir != self._rev_state.base_dir and
os.path.exists(path_join(rev_dir, 'data.pkl')))
for rev_dir in peer_dirs:
rev, valid = self.copy(rev_dir).load(None)
if valid:
yield rev_dir, rev
def _update_from_existing(self, file_set):
"""Updates state of this repository / revision with data saved from blaming earlier
revisions in this repository.
"""
assert isinstance(file_set, set), type(file_set)
# remaining_path_set = files in `file_set` that we haven't yet loaded
remaining_path_set = file_set - self.path_set
print('-' * 80)
print('Update data from previous blames. %d remaining of %d files' % (
len(remaining_path_set), len(file_set)))
if not remaining_path_set:
return
peer_revisions = list(self._get_peer_revisions())
print('Checking up to %d peer revisions for blame data' % len(peer_revisions))
this_sha = self._rev_state.summary['revision_sha']
this_date = self._rev_state.summary['date']
peer_revisions.sort(key=lambda dir_rev: dir_rev[1]._rev_state.summary['date'])
for i, (that_dir, that_rev) in enumerate(peer_revisions):
if not remaining_path_set:
break
print('%2d: %s,' % (i, that_dir), end=' ')
that_date = that_rev._rev_state.summary['date']
sign = '> ' if that_date > this_date else '<='
print('%s %s %s' % (date_str(that_date), sign, date_str(this_date)), end=' ')
# This is important. git diff can report 2 versions of a file as being identical while
# git blame reports different commits for 1 or more lines in the file
# In these cases we use the older commits.
if that_date > this_date:
print('later')
continue
that_sha = that_rev._rev_state.summary['revision_sha']
that_path_set = that_rev.path_set
that_bad_path_set = that_rev.bad_path_set
try:
diff_set = set(git_diff(this_sha, that_sha))
except subprocess.CalledProcessError:
print('git_diff(%s, %s) failed. Has someone deleted an old revision %s?' % (
this_sha, that_sha, that_sha), file=sys.stderr)
continue
self.bad_path_set.update(that_bad_path_set - diff_set)
# existing_path_set: files in remaining_path_set that we already have data for
existing_path_set = remaining_path_set & (that_path_set - diff_set)
for path in existing_path_set:
if path in that_rev.path_sha_aloc:
self.path_sha_aloc[path] = that_rev.path_sha_aloc[path]
if STRICT_CHECKING:
for sha in self.path_sha_aloc[path].keys():
assert sha in self.sha_date_author, '\n%s\nthis=%s\nthat=%s' % (
(sha, path),
self._rev_state.summary, that_rev._rev_state.summary)
if path in that_rev.path_sha_sloc:
self.path_sha_sloc[path] = that_rev.path_sha_sloc[path]
self.path_set.add(path)
remaining_path_set.remove(path)
print('%d files of %d remaining, diff=%d, used=%d' % (
len(remaining_path_set), len(file_set), len(diff_set), len(existing_path_set)))
print()
bad_path_set = self.bad_path_set
bad_path_set -= set(self.path_sha_aloc.keys())
path_set = self.path_set
path_set |= set(self.path_sha_aloc.keys()) | bad_path_set
self._debug_check()
def _update_new_files(self, file_set, force):
"""Computes base statistics over whole revision
Blames all files in `file_set`
Updates: sha_date_author, path_sha_loc for files that are not already in path_sha_loc
"""
rev_summary = self._rev_state.summary
sha_date_author = self.sha_date_author
if not force:
file_set = file_set - self.path_set
n_files = len(file_set)
print('-' * 80)
print('Update data by blaming %d files' % len(file_set))
loc0 = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_aloc.values())
commits0 = len(sha_date_author)
start = time.time()
blamed = 0
last_loc = loc0
last_i = 0
for path in file_set:
self.path_set.add(path)
if os.path.basename(path) in {'.gitignore'}:
self.bad_path_set.add(path)
max_date = rev_summary['date']
args_list = [(max_date, path) for path in file_set]
args_list.sort(key=lambda dip: -os.path.getsize(dip[1]))
with ProcessPool(ProcessPool.PROCESS, N_BLAME_PROCESSES) as pool:
for i, (path, h_d_a, sha_loc, sha_sloc, e) in enumerate(
pool.imap_unordered(_task_extract_author_sha_loc, args_list)):
if e is not None:
apath = os.path.abspath(path)
self.bad_path_set.add(path)
if isinstance(e, GitException):
if e.git_msg:
if e.git_msg != 'is empty':
print(' %s %s' % (apath, e.git_msg), file=sys.stderr)
else:
print(' %s cannot be blamed' % apath, file=sys.stderr)
continue
elif isinstance(e, (subprocess.CalledProcessError, IsADirectoryError)):
if not os.path.exists(path):
print(' %s no longer exists' % apath, file=sys.stderr)
continue
elif os.path.isdir(path):
print(' %s is a directory' % apath, file=sys.stderr)
continue
elif stat.S_IXUSR & os.stat(path)[stat.ST_MODE]:
print(' %s is an executable. e=%s' % (apath, e), file=sys.stderr)
continue
raise
else:
if path in self.bad_path_set:
self.bad_path_set.remove(path)
self.sha_date_author.update(h_d_a)
self.path_sha_aloc[path] = sha_loc
if sha_sloc:
self.path_sha_sloc[path] = sha_sloc
if i - last_i >= 100:
duration = time.time() - start
loc = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_aloc.values())
if loc != last_loc:
print('%d of %d files (%.1f%%), %d bad, %d LoC, %d commits, %.1f secs, %s' %
(i, n_files, 100 * i / n_files, i - blamed, loc - loc0,
len(sha_date_author) - commits0, duration,
procrustes(path, width=65)))
sys.stdout.flush()
last_loc = loc
last_i = i
blamed += 1
if STRICT_CHECKING:
_debug_check_dates(max_date, sha_date_author, self.path_sha_aloc)
assert path in self.path_sha_aloc, os.path.abspath(path)
assert self.path_sha_aloc[path], os.path.abspath(path)
assert sum(self.path_sha_aloc[path].values()), os.path.abspath(path)
if STRICT_CHECKING:
for path in file_set - self.bad_path_set:
if os.path.basename(path) in {'.gitignore'}:
continue
assert path in self.path_sha_aloc, os.path.abspath(path)
print('~' * 80)
duration = time.time() - start
aloc = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_aloc.values())
sloc = sum(sum(sha_loc.values()) for sha_loc in self.path_sha_sloc.values())
print('%d files,%d blamed,%d lines, %d source lines,%d commits,%.1f seconds' % (len(file_set), blamed,
aloc, sloc, len(sha_date_author), duration))
def update_data(self, file_set, force):
"""Updates blame state for this revision for this repository over files in 'file_set'
If `force` is True then blame all files in file_set, otherwise try tp re-use as much
existing blame data as possible.
Updates:
repository: sha_date_author
revision: path_sha_aloc, path_sha_sloc, file_set, bad_path_set
"""
assert isinstance(file_set, set), type(file_set)
n_paths0 = len(self.path_set)
print('^' * 80)
print('Update data for %d files. Previously %d files for this revision' % (
len(file_set), n_paths0))
self._debug_check()
if not force:
self._update_from_existing(file_set)
self._debug_check()
self._update_new_files(file_set, force)
self._debug_check()
if STRICT_CHECKING:
for path in set(file_set) - self.bad_path_set:
assert path in self.path_sha_aloc, path
return len(self.path_set) > n_paths0
def _filter_strings(str_iter, pattern):
"""Returns: Subset of strings in iterable `str_iter` that match regular expression `pattern`.
"""
if pattern is None:
return None
assert isinstance(str_iter, set), type(str_iter)
regex = re.compile(pattern, re.IGNORECASE)
return {s for s in str_iter if regex.search(s)}
def filter_path_sha_loc(blame_state, path_sha_loc, file_set=None, author_set=None):
""" blame_state: BlameState of revision
path_sha_loc: {path: {sha: loc}} over all files in revisions
file_set: files to filter on or None
author_set: authors to filter on or None
Returns: dict of items in `path_sha_loc` that match files in `file_set` and authors in
`author_set`.
NOTE: Does NOT modify path_sha_loc
"""
assert file_set is None or isinstance(file_set, set), type(file_set)
assert author_set is None or isinstance(author_set, set), type(author_set)
if file_set:
path_sha_loc = {path: sha_loc
for path, sha_loc in path_sha_loc.items()
if path in file_set}
if STRICT_CHECKING:
for path in path_sha_loc:
assert path_sha_loc[path], path
assert sum(path_sha_loc[path].values()), path
if author_set:
sha_set = {sha for sha, (_, author) in blame_state.sha_date_author.items()
if author in author_set}
path_sha_loc = {path: {sha: loc for sha, loc in sha_loc.items()
if sha in sha_set}
for path, sha_loc in path_sha_loc.items()}
path_sha_loc = {path: sha_loc
for path, sha_loc in path_sha_loc.items() if sha_loc}
if STRICT_CHECKING:
for path in path_sha_loc:
assert path_sha_loc[path], path
assert sum(path_sha_loc[path].values()), path
return path_sha_loc
def _task_show_oneline(sha):
"""Wrapper around git_show_oneline() to allow it to be executed by a multiprocessing Pool.
"""
text, exception = None, None
try:
text = git_show_oneline(sha)
except Exception as e:
exception = e
if not DO_MULTIPROCESSING:
raise
return sha, text, exception
def parallel_show_oneline(sha_iter):
"""Run git_show_oneline() on SHA-1 hashes in `sha_iter` in parallel
sha_iter: Iterable for some SHA-1 hashes
Returns: {sha: text} over SHA-1 hashes sha in `sha_iter`. text is git_show_oneline output
"""
sha_text = {}
exception = None
with ProcessPool(ProcessPool.THREAD, N_SHOW_THREADS) as pool:
for sha, text, e in pool.imap_unordered(_task_show_oneline, sha_iter):
if e:
exception = e
break
sha_text[sha] = text
if exception:
raise exception
return sha_text
def make_sha_path_loc(path_sha_loc):
"""Make a re-organized version of `path_sha_loc`
path_sha_loc: {path: {sha: loc}}
Returns: {sha: {path: loc}}
"""
sha_path_loc = defaultdict(dict)
for path, sha_loc in path_sha_loc.items():
for sha, loc in sha_loc.items():
sha_path_loc[sha][path] = loc
return sha_path_loc
def make_report_name(default_name, components):
if not components:
return default_name
elif len(components) == 1:
return list(components)[0]
else:
return ('(%s)' % ','.join(sorted(components))) if components else default_name
def make_report_dir(base_dir, default_name, components, max_len=None):
if max_len is None:
max_len = PATH_MAX
name = '.'.join(clean_path(cpt) for cpt in sorted(components)) if components else default_name
return path_join(base_dir, name)[:max_len]
def get_totals(path_sha_loc):
"""Returns: total numbers of files, commits, LoC for files in `path_sha_loc`
"""
all_commits = set()
total_loc = 0
for sha_loc in path_sha_loc.values():
all_commits.update(sha_loc.keys())
total_loc += sum(sha_loc.values())
return len(path_sha_loc), len(all_commits), total_loc
def write_manifest(blame_state, path_sha_loc, report_dir, name_description, title):
"""Write a README file in `report_dir`
"""
total_files, total_commits, total_loc = get_totals(path_sha_loc)
totals = 'Totals: %d files, %d commits, %d LoC' % (total_files, total_commits, total_loc)
repo_summary = blame_state._repo_state.summary
rev_summary = blame_state._rev_state.summary
details = 'Revision Details\n'\
'----------------\n'\
'Repository: %s (%s)\n'\
'Date: %s\n'\
'Description: %s\n'\
'SHA-1 hash %s\n' % (
repo_summary['remote_name'], repo_summary['remote_url'],
date_str(rev_summary['date']),
rev_summary['description'],
rev_summary['revision_sha'])
with open(path_join(report_dir, 'README'), 'wt') as f:
def put(s=''):
f.write('%s\n' % s)
put(title)
put('=' * len(title))
put()
put(totals)
put()
if details:
put(details)
put()
put('Files in this Directory')
put('-----------------------')
for name, description in sorted(name_description.items()):
put('%-12s: %s' % (name, description))
def compute_tables(blame_state, path_sha_loc):
"""Compute summary tables over whole report showing number of files and LoC by author and
file extension.
blame_state: BlameState of revision
path_sha_loc: {path: {sha: loc}} over files being reported
Returns: df_author_ext_files, df_author_ext_loc where
df_author_ext_files: DataFrame of file counts
df_author_ext_loc:: DataFrame of file counts
with both having columns of authors rows of file extensions.
"""
sha_date_author = blame_state.sha_date_author
exts = sorted({get_ext(path) for path in path_sha_loc.keys()})
authors = sorted({author for _, author in sha_date_author.values()})
assert '.patch' not in exts
assert '.dump' not in exts
author_ext_files = np.zeros((len(authors), len(exts)), dtype=np.int64)
author_ext_loc = np.zeros((len(authors), len(exts)), dtype=np.int64)
author_index = {author: i for i, author in enumerate(authors)}
ext_index = {ext: i for i, ext in enumerate(exts)}
if STRICT_CHECKING:
for path, v in path_sha_loc.items():
assert sum(v.values()), (path, len(v))
for i, e in enumerate(exts):
assert '--' not in e, e
assert '.' not in e[1:], e
for path, sha_loc in path_sha_loc.items():
ext = get_ext(path)
for sha, loc in sha_loc.items():
_, author = sha_date_author[sha]
a = author_index[author]
e = ext_index[ext]
author_ext_files[a, e] += 1
author_ext_loc[a, e] += loc
df_author_ext_files = DataFrame(author_ext_files, index=authors, columns=exts)
df_author_ext_loc = | DataFrame(author_ext_loc, index=authors, columns=exts) | pandas.DataFrame |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import numpy as np
import pandas as pd
_MESSAGE_X_NONE = "Must supply X"
_MESSAGE_Y_NONE = "Must supply y"
_MESSAGE_X_Y_ROWS = "X and y must have same number of rows"
_MESSAGE_X_SENSITIVE_ROWS = "X and the sensitive features must have same number of rows"
_KW_SENSITIVE_FEATURES = "sensitive_features"
def _validate_and_reformat_reductions_input(X, y, enforce_binary_sensitive_feature=False,
**kwargs):
if X is None:
raise ValueError(_MESSAGE_X_NONE)
if y is None:
raise ValueError(_MESSAGE_Y_NONE)
if _KW_SENSITIVE_FEATURES not in kwargs:
msg = "Must specify {0} (for now)".format(_KW_SENSITIVE_FEATURES)
raise RuntimeError(msg)
# Extract the target attribute
sensitive_features_vector = _make_vector(kwargs[_KW_SENSITIVE_FEATURES],
_KW_SENSITIVE_FEATURES)
if enforce_binary_sensitive_feature:
unique_labels = np.unique(sensitive_features_vector)
if len(unique_labels) > 2:
raise RuntimeError("Sensitive features contain more than two unique values")
# Extract the Y values
y_vector = _make_vector(y, "y")
X_rows, _ = _get_matrix_shape(X, "X")
if X_rows != y_vector.shape[0]:
raise RuntimeError(_MESSAGE_X_Y_ROWS)
if X_rows != sensitive_features_vector.shape[0]:
raise RuntimeError(_MESSAGE_X_SENSITIVE_ROWS)
return pd.DataFrame(X), y_vector, sensitive_features_vector
def _make_vector(formless, formless_name):
formed_vector = None
if isinstance(formless, list):
formed_vector = pd.Series(formless)
elif isinstance(formless, pd.DataFrame):
if len(formless.columns) == 1:
formed_vector = formless.iloc[:, 0]
else:
msgfmt = "{0} is a DataFrame with more than one column"
raise RuntimeError(msgfmt.format(formless_name))
elif isinstance(formless, pd.Series):
formed_vector = formless
elif isinstance(formless, np.ndarray):
if len(formless.shape) == 1:
formed_vector = | pd.Series(formless) | pandas.Series |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas
from pandas.compat import string_types
from pandas.core.dtypes.cast import find_common_type
from pandas.core.dtypes.common import (
is_list_like,
is_numeric_dtype,
is_datetime_or_timedelta_dtype,
)
from pandas.core.index import ensure_index
from pandas.core.base import DataError
from modin.engines.base.frame.partition_manager import BaseFrameManager
from modin.error_message import ErrorMessage
from modin.backends.base.query_compiler import BaseQueryCompiler
class PandasQueryCompiler(BaseQueryCompiler):
"""This class implements the logic necessary for operating on partitions
with a Pandas backend. This logic is specific to Pandas."""
def __init__(
self, block_partitions_object, index, columns, dtypes=None, is_transposed=False
):
assert isinstance(block_partitions_object, BaseFrameManager)
self.data = block_partitions_object
self.index = index
self.columns = columns
if dtypes is not None:
self._dtype_cache = dtypes
self._is_transposed = int(is_transposed)
# Index, columns and dtypes objects
_dtype_cache = None
def _get_dtype(self):
if self._dtype_cache is None:
def dtype_builder(df):
return df.apply(lambda row: find_common_type(row.values), axis=0)
map_func = self._prepare_method(
self._build_mapreduce_func(lambda df: df.dtypes)
)
reduce_func = self._build_mapreduce_func(dtype_builder)
# For now we will use a pandas Series for the dtypes.
if len(self.columns) > 0:
self._dtype_cache = (
self._full_reduce(0, map_func, reduce_func).to_pandas().iloc[0]
)
else:
self._dtype_cache = pandas.Series([])
# reset name to None because we use "__reduced__" internally
self._dtype_cache.name = None
return self._dtype_cache
dtypes = property(_get_dtype)
def compute_index(self, axis, data_object, compute_diff=True):
"""Computes the index after a number of rows have been removed.
Note: In order for this to be used properly, the indexes must not be
changed before you compute this.
Args:
axis: The axis to extract the index from.
data_object: The new data object to extract the index from.
compute_diff: True to use `self` to compute the index from self
rather than data_object. This is used when the dimension of the
index may have changed, but the deleted rows/columns are
unknown.
Returns:
A new pandas.Index object.
"""
def pandas_index_extraction(df, axis):
if not axis:
return df.index
else:
try:
return df.columns
except AttributeError:
return pandas.Index([])
index_obj = self.index if not axis else self.columns
old_blocks = self.data if compute_diff else None
new_indices = data_object.get_indices(
axis=axis,
index_func=lambda df: pandas_index_extraction(df, axis),
old_blocks=old_blocks,
)
return index_obj[new_indices] if compute_diff else new_indices
def _validate_set_axis(self, new_labels, old_labels):
new_labels = ensure_index(new_labels)
old_len = len(old_labels)
new_len = len(new_labels)
if old_len != new_len:
raise ValueError(
"Length mismatch: Expected axis has %d elements, "
"new values have %d elements" % (old_len, new_len)
)
return new_labels
_index_cache = None
_columns_cache = None
def _get_index(self):
return self._index_cache
def _get_columns(self):
return self._columns_cache
def _set_index(self, new_index):
if self._index_cache is None:
self._index_cache = ensure_index(new_index)
else:
new_index = self._validate_set_axis(new_index, self._index_cache)
self._index_cache = new_index
def _set_columns(self, new_columns):
if self._columns_cache is None:
self._columns_cache = ensure_index(new_columns)
else:
new_columns = self._validate_set_axis(new_columns, self._columns_cache)
self._columns_cache = new_columns
columns = property(_get_columns, _set_columns)
index = property(_get_index, _set_index)
# END Index, columns, and dtypes objects
# Internal methods
# These methods are for building the correct answer in a modular way.
# Please be careful when changing these!
def _prepare_method(self, pandas_func, **kwargs):
"""Prepares methods given various metadata.
Args:
pandas_func: The function to prepare.
Returns
Helper function which handles potential transpose.
"""
if self._is_transposed:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(
df.T, internal_indices=internal_indices, **kwargs
)
return pandas_func(df.T, **kwargs)
else:
def helper(df, internal_indices=[]):
if len(internal_indices) > 0:
return pandas_func(df, internal_indices=internal_indices, **kwargs)
return pandas_func(df, **kwargs)
return helper
def numeric_columns(self, include_bool=True):
"""Returns the numeric columns of the Manager.
Returns:
List of index names.
"""
columns = []
for col, dtype in zip(self.columns, self.dtypes):
if is_numeric_dtype(dtype) and (
include_bool or (not include_bool and dtype != np.bool_)
):
columns.append(col)
return columns
def numeric_function_clean_dataframe(self, axis):
"""Preprocesses numeric functions to clean dataframe and pick numeric indices.
Args:
axis: '0' if columns and '1' if rows.
Returns:
Tuple with return value(if any), indices to apply func to & cleaned Manager.
"""
result = None
query_compiler = self
# If no numeric columns and over columns, then return empty Series
if not axis and len(self.index) == 0:
result = pandas.Series(dtype=np.int64)
nonnumeric = [
col
for col, dtype in zip(self.columns, self.dtypes)
if not is_numeric_dtype(dtype)
]
if len(nonnumeric) == len(self.columns):
# If over rows and no numeric columns, return this
if axis:
result = pandas.Series([np.nan for _ in self.index])
else:
result = pandas.Series([0 for _ in self.index])
else:
query_compiler = self.drop(columns=nonnumeric)
return result, query_compiler
# END Internal methods
# Metadata modification methods
def add_prefix(self, prefix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(prefix) + str(x))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(prefix) + str(x))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
def add_suffix(self, suffix, axis=1):
if axis == 1:
new_columns = self.columns.map(lambda x: str(x) + str(suffix))
if self._dtype_cache is not None:
new_dtype_cache = self._dtype_cache.copy()
new_dtype_cache.index = new_columns
else:
new_dtype_cache = None
new_index = self.index
else:
new_index = self.index.map(lambda x: str(x) + str(suffix))
new_columns = self.columns
new_dtype_cache = self._dtype_cache
return self.__constructor__(
self.data, new_index, new_columns, new_dtype_cache, self._is_transposed
)
# END Metadata modification methods
# Copy
# For copy, we don't want a situation where we modify the metadata of the
# copies if we end up modifying something here. We copy all of the metadata
# to prevent that.
def copy(self):
return self.__constructor__(
self.data.copy(),
self.index.copy(),
self.columns.copy(),
self._dtype_cache,
self._is_transposed,
)
# END Copy
# Append/Concat/Join (Not Merge)
# The append/concat/join operations should ideally never trigger remote
# compute. These operations should only ever be manipulations of the
# metadata of the resulting object. It should just be a simple matter of
# appending the other object's blocks and adding np.nan columns for the new
# columns, if needed. If new columns are added, some compute may be
# required, though it can be delayed.
#
# Currently this computation is not delayed, and it may make a copy of the
# DataFrame in memory. This can be problematic and should be fixed in the
# future. TODO (devin-petersohn): Delay reindexing
def _join_index_objects(self, axis, other_index, how, sort=True):
"""Joins a pair of index objects (columns or rows) by a given strategy.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other_index: The other_index to join on.
how: The type of join to join to make (e.g. right, left).
Returns:
Joined indices.
"""
if isinstance(other_index, list):
joined_obj = self.columns if not axis else self.index
# TODO: revisit for performance
for obj in other_index:
joined_obj = joined_obj.join(obj, how=how)
return joined_obj
if not axis:
return self.columns.join(other_index, how=how, sort=sort)
else:
return self.index.join(other_index, how=how, sort=sort)
def join(self, other, **kwargs):
"""Joins a list or two objects together.
Args:
other: The other object(s) to join on.
Returns:
Joined objects.
"""
if not isinstance(other, list):
other = [other]
return self._join_list_of_managers(other, **kwargs)
def concat(self, axis, other, **kwargs):
"""Concatenates two objects together.
Args:
axis: The axis index object to join (0 for columns, 1 for index).
other: The other_index to concat with.
Returns:
Concatenated objects.
"""
return self._append_list_of_managers(other, axis, **kwargs)
def _append_list_of_managers(self, others, axis, **kwargs):
if not isinstance(others, list):
others = [others]
if self._is_transposed:
# If others are transposed, we handle that behavior correctly in
# `copartition`, but it is not handled correctly in the case that `self` is
# transposed.
return (
self.transpose()
._append_list_of_managers(
[o.transpose() for o in others], axis ^ 1, **kwargs
)
.transpose()
)
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
sort = kwargs.get("sort", None)
join = kwargs.get("join", "outer")
ignore_index = kwargs.get("ignore_index", False)
new_self, to_append, joined_axis = self.copartition(
axis ^ 1,
others,
join,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(axis, to_append)
if axis == 0:
# The indices will be appended to form the final index.
# If `ignore_index` is true, we create a RangeIndex that is the
# length of all of the index objects combined. This is the same
# behavior as pandas.
new_index = (
self.index.append([other.index for other in others])
if not ignore_index
else pandas.RangeIndex(
len(self.index) + sum(len(other.index) for other in others)
)
)
return self.__constructor__(new_data, new_index, joined_axis)
else:
# The columns will be appended to form the final columns.
new_columns = self.columns.append([other.columns for other in others])
return self.__constructor__(new_data, joined_axis, new_columns)
def _join_list_of_managers(self, others, **kwargs):
assert isinstance(
others, list
), "This method is for lists of QueryCompiler objects only"
assert all(
isinstance(other, type(self)) for other in others
), "Different Manager objects are being used. This is not allowed"
# Uses join's default value (though should not revert to default)
how = kwargs.get("how", "left")
sort = kwargs.get("sort", False)
lsuffix = kwargs.get("lsuffix", "")
rsuffix = kwargs.get("rsuffix", "")
new_self, to_join, joined_index = self.copartition(
0,
others,
how,
sort,
force_repartition=any(obj._is_transposed for obj in [self] + others),
)
new_data = new_self.concat(1, to_join)
# This stage is to efficiently get the resulting columns, including the
# suffixes.
if len(others) == 1:
others_proxy = pandas.DataFrame(columns=others[0].columns)
else:
others_proxy = [pandas.DataFrame(columns=other.columns) for other in others]
self_proxy = pandas.DataFrame(columns=self.columns)
new_columns = self_proxy.join(
others_proxy, lsuffix=lsuffix, rsuffix=rsuffix
).columns
return self.__constructor__(new_data, joined_index, new_columns)
# END Append/Concat/Join
# Copartition
def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
"""Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
"""
if isinstance(other, type(self)):
other = [other]
index_obj = (
[o.index for o in other] if axis == 0 else [o.columns for o in other]
)
joined_index = self._join_index_objects(
axis ^ 1, index_obj, how_to_join, sort=sort
)
# We have to set these because otherwise when we perform the functions it may
# end up serializing this entire object.
left_old_idx = self.index if axis == 0 else self.columns
right_old_idxes = index_obj
# Start with this and we'll repartition the first time, and then not again.
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
"""Create a function based on the old index and axis.
Args:
old_idx: The old index/columns
Returns:
A function that will be run in each partition.
"""
def reindex_partition(df):
if axis == 0:
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
# If the indices are equal we can skip partitioning so long as we are not
# forced to repartition. See note above about `force_repartition`.
if i != 0 or (left_old_idx.equals(joined_index) and not force_repartition):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if right_old_idxes[i].equals(joined_index) and not force_repartition:
reindex_right = None
else:
reindex_right = compute_reindex(right_old_idxes[i])
reindexed_self, reindexed_other = reindexed_self.copartition_datasets(
axis,
other[i].data,
reindex_left,
reindex_right,
other[i]._is_transposed,
)
reindexed_other_list.append(reindexed_other)
return reindexed_self, reindexed_other_list, joined_index
# Data Management Methods
def free(self):
"""In the future, this will hopefully trigger a cleanup of this object.
"""
# TODO create a way to clean up this object.
return
# END Data Management Methods
# To/From Pandas
def to_pandas(self):
"""Converts Modin DataFrame to Pandas DataFrame.
Returns:
Pandas DataFrame of the QueryCompiler.
"""
df = self.data.to_pandas(is_transposed=self._is_transposed)
if df.empty:
if len(self.columns) != 0:
df = pandas.DataFrame(columns=self.columns).astype(self.dtypes)
else:
df = pandas.DataFrame(columns=self.columns, index=self.index)
else:
ErrorMessage.catch_bugs_and_request_email(
len(df.index) != len(self.index) or len(df.columns) != len(self.columns)
)
df.index = self.index
df.columns = self.columns
return df
@classmethod
def from_pandas(cls, df, block_partitions_cls):
"""Improve simple Pandas DataFrame to an advanced and superior Modin DataFrame.
Args:
cls: DataManger object to convert the DataFrame to.
df: Pandas DataFrame object.
block_partitions_cls: BlockParitions object to store partitions
Returns:
Returns QueryCompiler containing data from the Pandas DataFrame.
"""
new_index = df.index
new_columns = df.columns
new_dtypes = df.dtypes
new_data = block_partitions_cls.from_pandas(df)
return cls(new_data, new_index, new_columns, dtypes=new_dtypes)
# END To/From Pandas
# To NumPy
def to_numpy(self):
"""Converts Modin DataFrame to NumPy Array.
Returns:
NumPy Array of the QueryCompiler.
"""
arr = self.data.to_numpy(is_transposed=self._is_transposed)
ErrorMessage.catch_bugs_and_request_email(
len(arr) != len(self.index) or len(arr[0]) != len(self.columns)
)
return arr
# END To NumPy
# Inter-Data operations (e.g. add, sub)
# These operations require two DataFrames and will change the shape of the
# data if the index objects don't match. An outer join + op is performed,
# such that columns/rows that don't have an index on the other DataFrame
# result in NaN values.
def _inter_manager_operations(self, other, how_to_join, func):
"""Inter-data operations (e.g. add, sub).
Args:
other: The other Manager for the operation.
how_to_join: The type of join to join to make (e.g. right, outer).
Returns:
New QueryCompiler with new data and index.
"""
reindexed_self, reindexed_other_list, joined_index = self.copartition(
0, other, how_to_join, sort=False
)
# unwrap list returned by `copartition`.
reindexed_other = reindexed_other_list[0]
new_columns = self._join_index_objects(
0, other.columns, how_to_join, sort=False
)
# THere is an interesting serialization anomaly that happens if we do
# not use the columns in `inter_data_op_builder` from here (e.g. if we
# pass them in). Passing them in can cause problems, so we will just
# use them from here.
self_cols = self.columns
other_cols = other.columns
def inter_data_op_builder(left, right, func):
left.columns = self_cols
right.columns = other_cols
# We reset here to make sure that the internal indexes match. We aligned
# them in the previous step, so this step is to prevent mismatches.
left.index = pandas.RangeIndex(len(left.index))
right.index = pandas.RangeIndex(len(right.index))
result = func(left, right)
result.columns = pandas.RangeIndex(len(result.columns))
return result
new_data = reindexed_self.inter_data_operation(
1, lambda l, r: inter_data_op_builder(l, r, func), reindexed_other
)
return self.__constructor__(new_data, joined_index, new_columns)
def _inter_df_op_handler(self, func, other, **kwargs):
"""Helper method for inter-manager and scalar operations.
Args:
func: The function to use on the Manager/scalar.
other: The other Manager/scalar.
Returns:
New QueryCompiler with new data and index.
"""
axis = kwargs.get("axis", 0)
axis = | pandas.DataFrame() | pandas.DataFrame |
# -*- coding: utf-8 -*-
import mando
try:
from mando.rst_text_formatter import RSTHelpFormatter as HelpFormatter
except ImportError:
from argparse import RawTextHelpFormatter as HelpFormatter
import pandas as pd
import typic
from tstoolbox import tsutils
from tsgettoolbox.ulmo.twc.kbdi.core import get_data
def twc_ulmo_df(county=None, start_date=None, end_date=None):
df = get_data(
county=county,
start= | pd.to_datetime(start_date) | pandas.to_datetime |
from sklearn.model_selection import StratifiedKFold
from spacekit.builder.architect import BuilderEnsemble
from spacekit.analyzer.compute import ComputeBinary
from spacekit.skopes.hst.svm.train import make_ensembles
from spacekit.generator.augment import training_data_aug, training_img_aug
from spacekit.preprocessor.transform import (
normalize_training_images,
)
from spacekit.preprocessor.transform import PowerX
from spacekit.extractor.load import load_datasets
import pandas as pd
import numpy as np
import os
HOME = os.path.abspath(os.curdir)
DATA = os.path.join(HOME, "data")
SVMDIR = os.path.join(DATA, "2021-07-28")
# fname = os.path.join(SVMDIR, "svm_combined_v3.csv")
fname = os.path.join(SVMDIR, "svm_combined_v4.csv")
# paths to labeled image directories
# img_path = os.path.join(SVMDIR, 'img')
img_path_npz = os.path.join(DATA, "training_img.npz")
img_path_png = os.path.join(SVMDIR, "img")
DIM = 3
CH = 3
WIDTH = 128
HEIGHT = 128
DEPTH = DIM * CH
SHAPE = (DIM, WIDTH, HEIGHT, CH)
TF_CPP_MIN_LOG_LEVEL = 2
params = dict(
batch_size=32,
epochs=200,
lr=1e-4,
decay=[100000, 0.96],
early_stopping=None,
verbose=2,
ensemble=True,
)
import datetime as dt
timestring = dt.datetime.now().isoformat()[:-7] # 2022-01-19T21:31:21
timestamp = int(dt.datetime.fromisoformat(timestring).timestamp())
dtstring = timestring.split("T")[0] + "-" + str(timestamp)
output_path = f"{DATA}/{dtstring}" # 2022-02-14-1644850390
os.makedirs(output_path, exist_ok=True)
img_size = 128
dim = 3
ch = 3
depth = dim * ch
cols = ["numexp", "rms_ra", "rms_dec", "nmatches", "point", "segment", "gaia"]
ncols = list(range(len(cols)))
model_name = "svm_ensemble"
df = load_datasets([fname])
from spacekit.extractor.load import SVMFileIO
((X_train, X_test, X_val), (y_train, y_test, y_val)), (train, test, val) = SVMFileIO(
img_path_npz, w=img_size, h=img_size, d=dim * ch, inference=False, data=df, v=0.85
).load()
X_train = pd.concat([X_train, X_val], axis=0)
y_train = | pd.concat([y_train, y_val], axis=0) | pandas.concat |
import decimal
import numpy as np
from numpy import iinfo
import pytest
import pandas as pd
from pandas import to_numeric
from pandas.util import testing as tm
class TestToNumeric(object):
def test_empty(self):
# see gh-16302
s = pd.Series([], dtype=object)
res = to_numeric(s)
expected = pd.Series([], dtype=np.int64)
tm.assert_series_equal(res, expected)
# Original issue example
res = to_numeric(s, errors='coerce', downcast='integer')
expected = pd.Series([], dtype=np.int8)
tm.assert_series_equal(res, expected)
def test_series(self):
s = pd.Series(['1', '-3.14', '7'])
res = to_numeric(s)
expected = pd.Series([1, -3.14, 7])
tm.assert_series_equal(res, expected)
s = pd.Series(['1', '-3.14', 7])
res = to_numeric(s)
tm.assert_series_equal(res, expected)
def test_series_numeric(self):
s = pd.Series([1, 3, 4, 5], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
s = pd.Series([1., 3., 4., 5.], index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
# bool is regarded as numeric
s = pd.Series([True, False, True, True],
index=list('ABCD'), name='XXX')
res = to_numeric(s)
tm.assert_series_equal(res, s)
def test_error(self):
s = pd.Series([1, -3.14, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([1, -3.14, 'apple'])
tm.assert_series_equal(res, expected)
res = to_numeric(s, errors='coerce')
expected = pd.Series([1, -3.14, np.nan])
tm.assert_series_equal(res, expected)
s = pd.Series(['orange', 1, -3.14, 'apple'])
msg = 'Unable to parse string "orange" at position 0'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
def test_error_seen_bool(self):
s = pd.Series([True, False, 'apple'])
msg = 'Unable to parse string "apple" at position 2'
with pytest.raises(ValueError, match=msg):
to_numeric(s, errors='raise')
res = to_numeric(s, errors='ignore')
expected = pd.Series([True, False, 'apple'])
tm.assert_series_equal(res, expected)
# coerces to float
res = to_numeric(s, errors='coerce')
expected = pd.Series([1., 0., np.nan])
tm.assert_series_equal(res, expected)
def test_list(self):
s = ['1', '-3.14', '7']
res = to_numeric(s)
expected = np.array([1, -3.14, 7])
tm.assert_numpy_array_equal(res, expected)
def test_list_numeric(self):
s = [1, 3, 4, 5]
res = | to_numeric(s) | pandas.to_numeric |
# -*- coding: utf-8 -*-
"""
Created on Tue Mar 31 17:21:26 2020
@author: <NAME>
"""
# -*- coding: utf-8 -*-
"""
Created on Wed Nov 21 21:23:55 2018
@author: <NAME>
"""
import os
import itertools
from operator import itemgetter
import pandas as pd
import numpy as np
from support_modules import nn_support as nsup
from support_modules import role_discovery as rl
from support_modules.readers import log_reader as lr
def calculate_resource_dedication(one_timestamp, log, df_resources):
"""Main method of intercase features calculations.
Args:
args (dict): parameters for training the network.
"""
# Event indexing
log['event_id'] = log.index
log = calculate_event_duration(log, one_timestamp)
# splitt events in start and complete
splitted_events = splitt_events(log, one_timestamp)
# Matching events with slices
ranges = match_slices(splitted_events)
# Reshaping of ranges
simplified = dict()
for i in range(0, len(ranges)):
simplified[i] = list(ranges[i]['events'])
ranges_df = | pd.DataFrame.from_dict(ranges, orient='index') | pandas.DataFrame.from_dict |
#!/usr/bin/env python
# coding: utf-8
# # UCI
# # Drug Review Dataset
# In[ ]:
import pandas as pd
# In[2]:
data_train = pd.read_csv('.....\\drugsCom_raw\\drugsComTrain_raw.tsv',delimiter='\t')
data_test = pd.read_csv('......\\drugsCom_raw\\drugsComTest_raw.tsv' ,delimiter='\t')
# In[ ]:
# In[3]:
df = pd.concat([data_train,data_test]) # combine the two dataFrames into one for a bigger data size and ease of preprocessing
# In[4]:
data_train.shape
# In[5]:
data_test.shape
# In[6]:
df.head()
# In[7]:
df.columns = ['Id','drugName','condition','review','rating','date','usefulCount'] #rename columns
# In[8]:
df.head()
# In[9]:
df['date'] = pd.to_datetime(df['date']) #convert date to datetime eventhough we are not using date in this
# In[10]:
df['date'].head() #confirm conversion
# In[11]:
df2 = df[['Id','review','rating']].copy() # create a new dataframe with just review and rating for sentiment analysis
# In[12]:
df.head() #confirm conversion
# In[13]:
df2.head()
# In[14]:
df2.isnull().any().any() # check for null
# In[15]:
df2.info(null_counts=True) #another way to check for null
# In[16]:
df2.info() #check for datatype, also shows null
# In[17]:
df2['Id'].unique() # shows unique Id as array
# In[18]:
df2['Id'].count() #count total number of items in the Id column
# In[19]:
df2['Id'].nunique() #shows unique Id values
# In[20]:
df['review'][1] # access indivdual value
# In[21]:
df.review[1] # another method to assess individual value in a Series
# In[22]:
import nltk
nltk.download(['punkt','stopwords'])
# In[23]:
from nltk.corpus import stopwords
stopwords = stopwords.words('english')
# In[24]:
df2['cleanReview'] = df2['review'].apply(lambda x: ' '.join([item for item in x.split() if item not in stopwords])) # remove stopwords from review
# In[26]:
df2['cleanReview'] = df2['review'].apply(lambda x: ' '.join([item for item in x.split() if item not in stopwords])) # remove stopwords from review
# In[56]:
import vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
# In[57]:
df2['vaderReviewScore'] = df2['cleanReview'].apply(lambda x: analyzer.polarity_scores(x)['compound'])
# In[59]:
positive_num = len(df2[df2['vaderReviewScore'] >=0.05])
neutral_num = len(df2[(df2['vaderReviewScore'] >-0.05) & (df2['vaderReviewScore']<0.05)])
negative_num = len(df2[df2['vaderReviewScore']<=-0.05])
# In[60]:
positive_num,neutral_num, negative_num
# In[61]:
df2['vaderSentiment']= df2['vaderReviewScore'].map(lambda x:int(2) if x>=0.05 else int(1) if x<=-0.05 else int(0) )
# In[62]:
df2['vaderSentiment'].value_counts()
# In[63]:
Total_vaderSentiment = positive_num + neutral_num + negative_num
Total_vaderSentiment
# In[64]:
df2.loc[df2['vaderReviewScore'] >=0.05,"vaderSentimentLabel"] ="positive"
df2.loc[(df2['vaderReviewScore'] >-0.05) & (df2['vaderReviewScore']<0.05),"vaderSentimentLabel"]= "neutral"
df2.loc[df2['vaderReviewScore']<=-0.05,"vaderSentimentLabel"] = "negative"
# In[65]:
df2.shape
# In[66]:
positive_rating = len(df2[df2['rating'] >=7.0])
neutral_rating = len(df2[(df2['rating'] >=4) & (df2['rating']<7)])
negative_rating = len(df2[df2['rating']<=3])
# In[67]:
positive_rating,neutral_rating,negative_rating
# In[68]:
Total_rating = positive_rating+neutral_rating+negative_rating
Total_rating
# In[69]:
df2['ratingSentiment']= df2['rating'].map(lambda x:int(2) if x>=7 else int(1) if x<=3 else int(0) )
# In[70]:
df2['ratingSentiment'].value_counts()
# In[72]:
df2.loc[df2['rating'] >=7.0,"ratingSentimentLabel"] ="positive"
df2.loc[(df2['rating'] >=4.0) & (df2['rating']<7.0),"ratingSentimentLabel"]= "neutral"
df2.loc[df2['rating']<=3.0,"ratingSentimentLabel"] = "negative"
# In[98]:
df2 = df2[['Id','review','cleanReview','rating','ratingSentiment','ratingSentimentLabel','vaderReviewScore','vaderSentimentLabel','vaderSentiment']]
# # =============================
# In[104]:
data_df=df2.drop(['review','cleanReview'],axis=1)
# In[149]:
data_df.head()
# In[150]:
data_df.info()
# In[145]:
#data_df=df2.drop(['ratingSentimentLabel'],axis=1)
# In[169]:
from sklearn.preprocessing import LabelEncoder
# In[188]:
encoder = LabelEncoder()
data_cat = data_df["review"]
data_cat_encod = encoder.fit_transform(data_cat)
data_cat_encod = pd.DataFrame(data_cat_encod,columns=["review"])
data_cat_encod.head()
# In[152]:
encoder = LabelEncoder()
data_cat = data_df["cleanReview"]
data_cat_encod = encoder.fit_transform(data_cat)
data_cat_encod = pd.DataFrame(data_cat_encod,columns=["cleanReview"])
data_cat_encod.head()
# In[153]:
encoder = LabelEncoder()
data_cat = data_df["vaderSentimentLabel"]
data_cat_encod = encoder.fit_transform(data_cat)
data_cat_encod = | pd.DataFrame(data_cat_encod,columns=["vaderSentimentLabel"]) | pandas.DataFrame |
# -*- coding: utf-8 -*-
"""Seaborn_and_Linear_Regression_(start).ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1KYXQrn37CzHVYaFYYBlRSEJobv0x113q
# Introduction
Do higher film budgets lead to more box office revenue? Let's find out if there's a relationship using the movie budgets and financial performance data that I've scraped from [the-numbers.com](https://www.the-numbers.com/movie/budgets) on **May 1st, 2018**.
<img src=https://i.imgur.com/kq7hrEh.png>
# Import Statements
"""
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
from sklearn.linear_model import LinearRegression
"""# Notebook Presentation"""
pd.options.display.float_format = '{:,.2f}'.format
from pandas.plotting import register_matplotlib_converters
| register_matplotlib_converters() | pandas.plotting.register_matplotlib_converters |
import os
from typing import Any, Optional
import gspread
import pandas as pd
from oauth2client.service_account import ServiceAccountCredentials
import socket
from datetime import datetime
from time import sleep
import traceback
_current_experiment = None # type: Optional[ExperimentParams]
def _check_experiment():
if _current_experiment is None:
raise Exception("No experiment is running!")
def log_experiment_id(experiment_id: str):
"""
Sets experiment id
:param experiment_id:
:return:
"""
_check_experiment()
_current_experiment.log_experiment_id(experiment_id)
def log_status(status: str):
"""
Sets status of the current experiment
:param status:
"""
_check_experiment()
_current_experiment.log_status(status)
def log_comment(text: str):
"""
Sets comment of the current experiment
"""
_check_experiment()
_current_experiment.log_comment(text)
def log_progress(current: int, max_value: int):
"""
Sets progress of the current experiment
"""
_check_experiment()
_current_experiment.log_progress(current, max_value)
def log_metric(metric: str, value: Any):
"""
Logs metric of the current experiment
:param metric: metric name
:param value: metric value
"""
_check_experiment()
_current_experiment.log_metric(metric, value)
class ParamsException(Exception):
pass
class ParamsSheet(object):
client_credentials = None
params_sheet_id = None
def __init__(self, parser, writable_column_types=None, experiment_id_column='experiment_id', server_name=None):
"""
ParamsSheet object represents state of google spreadsheet with parameters and all methods to access it.
:param parser: Argparse parser object with no positional arguments
:param writable_column_types: dict of names of model metric columns and their types
:param experiment_id_column: str name of experiment id column
"""
assert isinstance(writable_column_types, dict), "writable_column_types has to be a dict"
self.parser = parser
self.experiment_id_column = experiment_id_column
self.writable_column_types = writable_column_types
self.column_types = writable_column_types.copy()
self.update_type_from_parser()
self.defaults = self.get_defaults_from_parser()
if server_name is None:
self.server = os.environ.get('SERVERNAME', None)
if self.server is None:
self.server = socket.gethostname()
else:
self.server = server_name + "_" + socket.gethostname()
self._generate_credentials()
def _generate_credentials(self):
if isinstance(self.client_credentials, dict):
credentials = ServiceAccountCredentials._from_parsed_json_keyfile(self.client_credentials,
['https://spreadsheets.google.com/feeds'])
else:
credentials = ServiceAccountCredentials.from_json_keyfile_name(self.client_credentials,
['https://spreadsheets.google.com/feeds'])
gc = gspread.authorize(credentials)
self.sheet = gc.open_by_key(self.params_sheet_id).sheet1
first_cell = self.sheet.cell(1, 1).value
try:
self.column_row_id = int(first_cell)
except ValueError:
self.column_row_id = 1
self.columns = self.sheet.row_values(self.column_row_id)
def get_params_for_training(self):
full_params = self.get_table()
params_to_process = full_params[
(full_params.status == "") & (full_params.server == "")]
params_for_this_server = full_params[
(full_params.status == "") & (full_params.server == self.server)]
if len(params_for_this_server):
params_to_process = params_for_this_server
if len(params_to_process) == 0:
raise ParamsException("No params to process!")
return ExperimentParams(
params_to_process.index[0], dict(params_to_process.iloc[0]), self)
def get_table(self):
recs = self.sheet.get_all_records(head=self.column_row_id, default_blank="")
return | pd.DataFrame(recs) | pandas.DataFrame |
import mysql.connector as conn
import pandas as pd
def remove_address(roll,password):
try:
cnx=conn.connect(user='root',password='<PASSWORD>',host='127.0.0.1',database='library')
cur_address=cnx.cursor()
cur_address.execute("select add_id,address from address where roll=%s",(roll,))
temp=cur_address.fetchall()
print("the total addresses you have register are......")
frame=pd.DataFrame(temp,columns=['address id','address'])
print(frame)
addid=input("Enter address id which you want to remove:")
cur_address.execute("delete from address where add_id=%s",(addid,))
cnx.commit()
print("address removed successfully...?")
cnx.close()
except:
pass
def remove_phone(roll,password):
try:
cnx=conn.connect(user='root',password='<PASSWORD>',host='127.0.0.1',database='library')
cur_phone=cnx.cursor()
cur_phone.execute("select phone_id,phone from phone where roll=%s",(roll,))
temp=cur_phone.fetchall()
print("the total contacts you have register are......")
frame=pd.DataFrame(temp,columns=['phone id','phone number'])
print(frame)
phoneid=input("Enter phone_id which you want to remove:")
cur_phone.execute("delete from phone where phone_id=%s",(phoneid,))
cnx.commit()
print("phone number removed successfully...?")
cnx.close()
except:
pass
def remove_book(roll,password):
try:
cnx=conn.connect(user='root',password='<PASSWORD>',host='127.0.0.1',database='library')
cur_book=cnx.cursor()
cur_borrow=cnx.cursor()
cur_book_bank=cnx.cursor()
cur_student=cnx.cursor()
cur_book.execute("select book_id,book_name from book")
temp=cur_book.fetchall()
frame=pd.DataFrame(temp,columns=['BOOK-ID','BOOK-NAME'])
print("total books we have......")
print(frame)
bookid=int(input("Enter the book-id for which book you want to remove:"))
try:
cur_borrow.execute("select roll from borrow where book_id=%s",(bookid,))
rolls=[]
stu=[]
for i in cur_borrow:
rolls.append(i[0])
if len(rolls)==0:
raise ValueError()
print("You cannot remove this book.......\nsome peoples have borrowed this book")
print("the peoples are.........")
for i in rolls:
cur_student.execute("select roll,f_name,l_name from student where roll=%s",(i,))
temp=cur_student.fetchone()
stu.append(temp)
frame2=pd.DataFrame(stu,columns=['ROLL','FNAME','LNAME'])
print(frame2)
cnx.close()
except:
cur_book_bank.execute("delete from book_bank where book_id=%s",(bookid,))
cnx.commit()
cur_book.execute("delete from book where book_id=%s",(bookid,))
cnx.commit()
print("book removed successfully from the record.....")
cnx.close()
except:
pass
def remove_student(roll,password):
try:
cnx=conn.connect(user='root',password='<PASSWORD>',host='127.0.0.1',database='library')
cur_book=cnx.cursor()
cur_borrow=cnx.cursor()
cur_phone=cnx.cursor()
cur_address=cnx.cursor()
cur_course_batch_stu=cnx.cursor()
cur_login=cnx.cursor()
cur_student=cnx.cursor()
cur_student.execute("select roll,f_name,l_name from student")
temp=cur_student.fetchall()
frame= | pd.DataFrame(temp,columns=['ROLL','F-NAME','L-NAME']) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import json
import pandas as pd
from datetimewidget.widgets import DateTimeWidget
from django import forms
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from dataops import pandas_db, ops
from ontask import ontask_prefs, is_legal_name
from ontask.forms import RestrictedFileField, dateTimeOptions
from .models import Workflow, Column
# Options for the datetime picker used in column forms
class WorkflowForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
self.user = kwargs.pop('workflow_user', None)
super(WorkflowForm, self).__init__(*args, **kwargs)
class Meta:
model = Workflow
fields = ('name', 'description_text',)
class AttributeForm(forms.Form):
def __init__(self, *args, **kwargs):
self.form_fields = kwargs.pop('form_fields')
super(AttributeForm, self).__init__(*args, **kwargs)
# Create the set of fields
for key, val_field, val in self.form_fields:
# Field for the key
self.fields[key] = forms.CharField(
max_length=1024,
initial=key,
strip=True,
label='')
# Field for the value
self.fields[val_field] = forms.CharField(
max_length=1024,
initial=val,
label='')
def clean(self):
data = super(AttributeForm, self).clean()
new_keys = [data[x] for x, _, _ in self.form_fields]
# Check that there were not duplicate keys given
if len(set(new_keys)) != len(new_keys):
raise forms.ValidationError(
'Repeated names are not allowed'
)
return data
class AttributeItemForm(forms.Form):
# Key field
key = forms.CharField(max_length=1024,
strip=True,
required=True,
label='Name')
# Field for the value
value = forms.CharField(max_length=1024,
label='Value')
def __init__(self, *args, **kwargs):
self.keys = kwargs.pop('keys')
key = kwargs.pop('key', '')
value = kwargs.pop('value', '')
super(AttributeItemForm, self).__init__(*args, **kwargs)
self.fields['key'].initial = key
self.fields['value'].initial = value
def clean(self):
data = super(AttributeItemForm, self).clean()
# Name is legal
msg = is_legal_name(data['key'])
if msg:
self.add_error('key', msg)
return data
if data['key'] in self.keys:
self.add_error(
'key',
'Name has to be different from all existing ones.')
return data
return data
class ColumnBasicForm(forms.ModelForm):
# Raw text for the categories
raw_categories = forms.CharField(
strip=True,
required=False,
label='Comma separated list of allowed values')
def __init__(self, *args, **kwargs):
self.workflow = kwargs.pop('workflow', None)
self.data_frame = None
super(ColumnBasicForm, self).__init__(*args, **kwargs)
self.fields['raw_categories'].initial = \
', '.join([str(x) for x in self.instance.get_categories()])
def clean(self):
data = super(ColumnBasicForm, self).clean()
# Load the data frame from the DB for various checks and leave it in
# the form for future use
self.data_frame = pandas_db.load_from_db(self.workflow.id)
# Column name must be a legal variable name
if 'name' in self.changed_data:
# Name is legal
msg = is_legal_name(data['name'])
if msg:
self.add_error('name', msg)
return data
# Check that the name is not present already
if next((c for c in self.workflow.columns.all()
if c.id != self.instance.id and
c.name == data['name']), None):
# New column name collides with existing one
self.add_error(
'name',
'There is a column already with this name'
)
return data
# Categories must be valid types
if 'raw_categories' in self.changed_data:
if data['raw_categories']:
# Condition 1: Values must be valid for the type of the column
category_values = [x.strip()
for x in data['raw_categories'].split(',')]
try:
valid_values = Column.validate_column_values(
data['data_type'],
category_values)
except ValueError:
self.add_error(
'raw_categories',
'Incorrect list of values'
)
return data
# Condition 2: The values in the dataframe column must be in
# these categories (only if the column is being edited, though
if self.instance.name and \
not all([x in valid_values
for x in self.data_frame[self.instance.name]
if x and not | pd.isnull(x) | pandas.isnull |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jan 11 18:29:39 2021
@author: Clement
"""
import pandas
import numpy
import os
import sys
import geopandas as gpd
import tqdm
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from gen_fct import df_fct
from gen_fct import file_fct
class WorldDataSet:
def __init__ (self):
self.df_cases = pandas.DataFrame()
self.df_death = pandas.DataFrame()
self.df_fra = | pandas.DataFrame() | pandas.DataFrame |
import math
from collections import OrderedDict
from datetime import datetime
import pytest
from rpy2 import rinterface
from rpy2 import robjects
from rpy2.robjects import vectors
from rpy2.robjects import conversion
class MockNamespace(object):
def __getattr__(self, name):
return None
has_pandas = False
try:
import pandas
has_pandas = True
except:
pandas = MockNamespace()
has_numpy = False
try:
import numpy
has_numpy = True
except:
numpy = MockNamespace()
if has_pandas:
import rpy2.robjects.pandas2ri as rpyp
from rpy2.robjects import default_converter
from rpy2.robjects.conversion import localconverter
@pytest.mark.skipif(not has_pandas, reason='Package pandas is not installed.')
class TestPandasConversions(object):
def testActivate(self):
#FIXME: is the following still making sense ?
assert rpyp.py2rpy != robjects.conversion.py2rpy
l = len(robjects.conversion.py2rpy.registry)
k = set(robjects.conversion.py2rpy.registry.keys())
rpyp.activate()
assert len(conversion.py2rpy.registry) > l
rpyp.deactivate()
assert len(conversion.py2rpy.registry) == l
assert set(conversion.py2rpy.registry.keys()) == k
def testActivateTwice(self):
#FIXME: is the following still making sense ?
assert rpyp.py2rpy != robjects.conversion.py2rpy
l = len(robjects.conversion.py2rpy.registry)
k = set(robjects.conversion.py2rpy.registry.keys())
rpyp.activate()
rpyp.deactivate()
rpyp.activate()
assert len(conversion.py2rpy.registry) > l
rpyp.deactivate()
assert len(conversion.py2rpy.registry) == l
assert set(conversion.py2rpy.registry.keys()) == k
def test_dataframe(self):
# Content for test data frame
l = (
('b', numpy.array([True, False, True], dtype=numpy.bool_)),
('i', numpy.array([1, 2, 3], dtype='i')),
('f', numpy.array([1, 2, 3], dtype='f')),
# ('s', numpy.array([b'b', b'c', b'd'], dtype='S1')),
('u', numpy.array([u'a', u'b', u'c'], dtype='U')),
('dates', [datetime(2012, 5, 2),
datetime(2012, 6, 3),
datetime(2012, 7, 1)])
)
od = OrderedDict(l)
# Pandas data frame
pd_df = pandas.core.frame.DataFrame(od)
# Convert to R
with localconverter(default_converter + rpyp.converter) as cv:
rp_df = robjects.conversion.py2rpy(pd_df)
assert pd_df.shape[0] == rp_df.nrow
assert pd_df.shape[1] == rp_df.ncol
# assert tuple(rp_df.rx2('s')) == (b'b', b'c', b'd')
assert tuple(rp_df.rx2('u')) == ('a', 'b', 'c')
def test_dataframe_columnnames(self):
pd_df = pandas.DataFrame({'the one': [1, 2], 'the other': [3, 4]})
# Convert to R
with localconverter(default_converter + rpyp.converter) as cv:
rp_df = robjects.conversion.py2rpy(pd_df)
assert tuple(rp_df.names) == ('the one', 'the other')
def test_series(self):
Series = pandas.core.series.Series
s = Series(numpy.random.randn(5), index=['a', 'b', 'c', 'd', 'e'])
with localconverter(default_converter + rpyp.converter) as cv:
rp_s = robjects.conversion.py2rpy(s)
assert isinstance(rp_s, rinterface.FloatSexpVector)
@pytest.mark.parametrize('dtype',
('i',
numpy.int32 if has_pandas else None,
numpy.int8 if has_pandas else None,
numpy.int16 if has_pandas else None,
numpy.int32 if has_pandas else None,
numpy.int64 if has_pandas else None,
numpy.uint8 if has_pandas else None,
numpy.uint16 if has_pandas else None,
pandas.Int32Dtype if has_pandas else None,
pandas.Int64Dtype if has_pandas else None))
def test_series_int(self, dtype):
Series = pandas.core.series.Series
s = Series(range(5),
index=['a', 'b', 'c', 'd', 'e'],
dtype=dtype)
with localconverter(default_converter + rpyp.converter) as cv:
rp_s = robjects.conversion.py2rpy(s)
assert isinstance(rp_s, rinterface.IntSexpVector)
@pytest.mark.parametrize('dtype',
(pandas.Int32Dtype() if has_pandas else None,
| pandas.Int64Dtype() | pandas.Int64Dtype |
from abc import abstractmethod
from analizer.abstract.expression import Expression
from enum import Enum
from storage.storageManager import jsonMode
from analizer.typechecker.Metadata import Struct
from analizer.typechecker import Checker
import pandas as pd
from analizer.symbol.symbol import Symbol
from analizer.symbol.environment import Environment
class SELECT_MODE(Enum):
ALL = 1
PARAMS = 2
# carga de datos
Struct.load()
# variable encargada de almacenar la base de datos a utilizar
dbtemp = ""
class Instruction:
"""
Esta clase representa una instruccion
"""
def __init__(self, row, column) -> None:
self.row = row
self.column = column
@abstractmethod
def execute(self, environment):
"""
Metodo que servira para ejecutar las expresiones
"""
class SelectOnlyParams(Instruction):
def __init__(self, params, row, column):
Instruction.__init__(self, row, column)
self.params = params
def execute(self, environment):
value = [p.execute(environment).value for p in self.params]
labels = [p.temp for p in self.params]
return labels, value
class SelectParams(Instruction):
def __init__(self, mode, params, row, column):
Instruction.__init__(self, row, column)
self.mode = mode
self.params = params
def execute(self, environment):
pass
class WhereClause(Instruction):
def __init__(self, series, row, column):
super().__init__(row, column)
self.series = series
def execute(self, environment):
filt = self.series.execute(environment)
return environment.dataFrame.loc[filt.value]
class Select(Instruction):
def __init__(self, params, fromcl, wherecl, row, column):
Instruction.__init__(self, row, column)
self.params = params
self.wherecl = wherecl
self.fromcl = fromcl
def execute(self, environment):
newEnv = Environment(environment, dbtemp)
self.fromcl.execute(newEnv)
value = [p.execute(newEnv).value for p in self.params]
labels = [p.temp for p in self.params]
for i in range(len(labels)):
newEnv.dataFrame[labels[i]] = value[i]
if self.wherecl == None:
return newEnv.dataFrame.filter(labels)
wh = self.wherecl.execute(newEnv)
w2 = wh.filter(labels)
return w2
class Drop(Instruction):
"""
Clase que representa la instruccion DROP TABLE and DROP DATABASE
Esta instruccion es la encargada de eliminar una base de datos en el DBMS
"""
def __init__(self, structure, name, exists):
self.structure = structure
self.name = name
self.exists = exists
def execute(self, environment):
if self.structure == "TABLE":
if dbtemp != "":
valor = jsonMode.dropTable(dbtemp, self.name)
if valor == 2:
return "La base de datos no existe"
if valor == 3:
return "La tabla no existe en la base de datos"
if valor == 1:
return "Hubo un problema en la ejecucion de la sentencia"
if valor == 0:
Struct.dropTable(dbtemp, self.name)
return "Instruccion ejecutada con exito DROP TABLE"
return "El nombre de la base de datos no esta especificado operacion no realizada"
else:
valor = jsonMode.dropDatabase(self.name)
if valor == 1:
return "Hubo un problema en la ejecucion de la sentencia"
if valor == 2:
return "La base de datos no existe"
if valor == 0:
Struct.dropDatabase(self.name)
return "Instruccion ejecutada con exito DROP DATABASE"
return "Fatal Error: DropTable"
class AlterDataBase(Instruction):
def __init__(self, option, name, newname):
self.option = option # define si se renombra o se cambia de dueño
self.name = name # define el nombre nuevo de la base de datos o el nuevo dueño
self.newname = newname
def execute(self, environment):
if self.option == "RENAME":
valor = jsonMode.alterDatabase(self.name, self.newname)
if valor == 2:
return "La base de datos no existe"
if valor == 3:
return "El nuevo nombre para la base de datos existe"
if valor == 1:
return "Hubo un problema en la ejecucion de la sentencia"
if valor == 0:
Struct.alterDatabaseRename(self.name, self.newname)
return "Instruccion ejecutada con exito ALTER DATABASE RENAME"
return "Error ALTER DATABASE RENAME"
elif self.option == "OWNER":
valor = Struct.alterDatabaseOwner(self.name, self.newname)
if valor == 0:
return "Instruccion ejecutada con exito ALTER DATABASE OWNER"
return "Error ALTER DATABASE OWNER"
return "Fatal Error ALTER DATABASE"
class Truncate(Instruction):
def __init__(self, name):
self.name = name
def execute(self, environment):
valor = jsonMode.truncate(dbtemp, self.name)
if valor == 2:
return "La base de datos no existe"
if valor == 3:
return "El nombre de la tabla no existe"
if valor == 1:
return "Hubo un problema en la ejecucion de la sentencia"
if valor == 0:
return "Instruccion ejecutada con exito"
class InsertInto(Instruction):
def __init__(self, tabla, columns, parametros):
self.tabla = tabla
self.parametros = parametros
self.columns = columns
def execute(self, environment):
lista = []
params = []
tab = self.tabla
for p in self.parametros:
params.append(p.execute(environment))
result = Checker.checkInsert(dbtemp, self.tabla, self.columns, params)
if result[0] == None:
for p in result[1]:
if p == None:
lista.append(p)
else:
lista.append(p.value)
res = jsonMode.insert(dbtemp, tab, lista)
if res == 2:
return "No existe la base de datos"
elif res == 3:
return "No existe la tabla"
elif res == 5:
return "Columnas fuera de los limites"
elif res == 4:
return "Llaves primarias duplicadas"
elif res == 1:
return "Error en la operacion"
elif res == 0:
return "Fila Insertada correctamente"
else:
return result[0]
class useDataBase(Instruction):
def __init__(self, db):
self.db = db
def execute(self, environment):
global dbtemp
# environment.database = self.db
dbtemp = self.db
class showDataBases(Instruction):
def __init__(self, like):
if like != None:
self.like = like[1 : len(like) - 1]
else:
self.like = None
def execute(self, environment):
lista = []
if self.like != None:
for l in jsonMode.showDatabases():
if self.like in l:
lista.append(l)
else:
lista = jsonMode.showDatabases()
if len(lista) == 0:
print("No hay bases de datos")
else:
return lista
class CreateDatabase(Instruction):
"""
Clase que representa la instruccion CREATE DATABASE
Esta instruccion es la encargada de crear una nueva base de datos en el DBMS
"""
def __init__(self, replace, exists, name, owner, mode):
self.exists = exists
self.name = name
self.mode = mode
self.owner = owner
self.replace = replace
def execute(self, environment):
result = jsonMode.createDatabase(self.name)
"""
0: insert
1: error
2: exists
"""
if self.mode == None:
self.mode = 1
if result == 0:
Struct.createDatabase(self.name, self.mode, self.owner)
report = "Base de datos insertada"
elif result == 1:
report = "Error al insertar la base de datos"
elif result == 2 and self.replace:
Struct.replaceDatabase(self.name, self.mode, self.owner)
report = "Base de datos reemplazada"
elif result == 2 and self.exists:
report = "Base de datos no insertada, la base de datos ya existe"
else:
report = "Error: La base de datos ya existe"
return report
class CreateTable(Instruction):
def __init__(self, exists, name, inherits, columns=[]):
self.exists = exists
self.name = name
self.columns = columns
self.inherits = inherits
def execute(self, environment):
nCol = self.count()
result = jsonMode.createTable(dbtemp, self.name, nCol)
"""
Result
0: insert
1: error
2: not found database
3: exists table
"""
if result == 0:
insert = Struct.insertTable(dbtemp, self.name, self.columns, self.inherits)
if insert == None:
report = "Tabla " + self.name + " creada"
else:
jsonMode.dropTable(dbtemp, self.name)
Struct.dropTable(dbtemp, self.name)
report = insert
elif result == 1:
report = "Error: No se puedo crear la tabla: " + self.name
elif result == 2:
report = "Error: Base de datos no encontrada: " + dbtemp
elif result == 3 and self.exists:
report = "Tabla no creada, ya existe en la base de datos"
else:
report = "Error: ya existe la tabla " + self.name
return report
def count(self):
n = 0
for column in self.columns:
if not column[0]:
n += 1
return n
class CreateType(Instruction):
def __init__(self, exists, name, values=[]):
self.exists = exists
self.name = name
self.values = values
def execute(self, environment):
lista = []
for value in self.values:
lista.append(value.execute(environment).value)
result = Struct.createType(self.exists, self.name, lista)
if result == None:
report = "Type creado"
else:
report = result
return report
# TODO: Operacion Check
class CheckOperation(Instruction):
"""
Clase encargada de la instruccion CHECK que almacena la condicion
a desarrollar en el CHECK
"""
def __init__(self, exp1, exp2, operator, row, column):
Instruction.__init__(self, row, column)
self.exp1 = exp1
self.exp2 = exp2
self.operator = operator
def execute(self, environment, value1, value2, type_):
exp1 = self.exp1.execute(environment)
exp2 = self.exp2.execute(environment)
operator = self.operator
if exp1.type == "ID" and exp2.type != "ID":
value2 = exp2.value
elif exp1.type != "ID" and exp2.type == "ID":
value1 = exp1.value
elif exp1.type == "ID" and exp2.type == "ID":
pass
else:
print("Error en el CHECK")
return None
if type_ == "MONEY":
value1 = str(value1)
value2 = str(value2)
try:
comps = {
"<": value1 < value2,
">": value1 > value2,
">=": value1 >= value2,
"<=": value1 <= value2,
"=": value1 == value2,
"!=": value1 != value2,
"<>": value1 != value2,
"ISDISTINCTFROM": value1 != value2,
"ISNOTDISTINCTFROM": value1 == value2,
}
value = comps.get(operator, None)
if value == None:
return Expression.ErrorBinaryOperation(
exp1.value, exp2.value, self.row, self.column
)
return value
except:
print("Error fatal CHECK")
# ---------------------------- FROM ---------------------------------
class FromClause(Instruction):
"""
Clase encargada de la clausa FROM para la obtencion de datos
"""
def __init__(self, tables, aliases, row, column):
Instruction.__init__(self, row, column)
self.tables = tables
self.aliases = aliases
def crossJoin(self, tables):
if len(tables) <= 1:
return tables[0]
for t in tables:
t["____tempCol"] = 1
new_df = tables[0]
i = 1
while i < len(tables):
new_df = pd.merge(new_df, tables[i], on=["____tempCol"])
i += 1
new_df = new_df.drop("____tempCol", axis=1)
return new_df
def execute(self, environment):
tempDf = None
for i in range(len(self.tables)):
data = self.tables[i].execute(environment)
print("---------------------------------------")
print(self.aliases[i])
if isinstance(self.tables[i], Select):
newNames = {}
subqAlias = self.aliases[i]
for columnName in data.iteritems():
colSplit = columnName.split(".")
if len(colSplit) >= 2:
newNames[columnName] = subqAlias + "." + colSplit[1]
else:
newNames[columnName] = subqAlias + "." + colSplit[0]
data.rename(columns=newNames, inplace=True)
environment.addVar(subqAlias, subqAlias, "TABLE", self.row, self.column)
else:
sym = Symbol(
self.tables[i].name,
self.tables[i].type_,
self.tables[i].row,
self.tables[i].column,
)
environment.addSymbol(self.tables[i].name, sym)
if self.aliases[i]:
environment.addSymbol(self.aliases[i], sym)
if i == 0:
tempDf = data
else:
tempDf = self.crossJoin([tempDf,data])
environment.dataFrame = tempDf
return
class TableID(Expression):
"""
Esta clase representa un objeto abstracto para el manejo de las tablas
"""
type_ = None
def __init__(self, name, row, column):
Expression.__init__(self, row, column)
self.name = name
def execute(self, environment):
result = jsonMode.extractTable(dbtemp, self.name)
if result == None:
return "FATAL ERROR TABLE ID"
# Almacena una lista con con el nombre y tipo de cada columna
lst = Struct.extractColumns(dbtemp, self.name)
columns = [l.name for l in lst]
newColumns = [self.name + "." + col for col in columns]
df = | pd.DataFrame(result, columns=newColumns) | pandas.DataFrame |
from itertools import repeat, chain
import numpy as np
import pandas as pd
import pytest
from scipy import sparse
import scanpy as sc
def test_obs_df():
adata = sc.AnnData(
X=np.ones((2, 2)),
obs=pd.DataFrame({"obs1": [0, 1], "obs2": ["a", "b"]}, index=["cell1", "cell2"]),
var=pd.DataFrame({"gene_symbols": ["genesymbol1", "genesymbol2"]}, index=["gene1", "gene2"]),
obsm={"eye": np.eye(2), "sparse": sparse.csr_matrix(np.eye(2))},
layers={"double": np.ones((2, 2)) * 2}
)
adata.raw = sc.AnnData(
X=np.zeros((2, 2)),
var=pd.DataFrame({"gene_symbols": ["raw1", "raw2"]}, index=["gene1", "gene2"])
)
assert np.all(np.equal(
sc.get.obs_df(adata, keys=["gene2", "obs1"], obsm_keys=[("eye", 0), ("sparse", 1)]),
pd.DataFrame({"gene2": [1, 1], "obs1": [0, 1], "eye-0": [1, 0], "sparse-1": [0, 1]}, index=adata.obs_names)
))
assert np.all(np.equal(
sc.get.obs_df(adata, keys=["genesymbol2", "obs1"], obsm_keys=[("eye", 0), ("sparse", 1)], gene_symbols="gene_symbols"),
| pd.DataFrame({"genesymbol2": [1, 1], "obs1": [0, 1], "eye-0": [1, 0], "sparse-1": [0, 1]}, index=adata.obs_names) | pandas.DataFrame |
# --------------
# Importing Necessary libraries
import warnings
warnings.filterwarnings("ignore")
from matplotlib import pyplot as plt
plt.rcParams['figure.figsize'] = (10, 8)
import numpy as np
import pandas as pd
from sklearn.model_selection import GridSearchCV
from sklearn import preprocessing
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Imputer
# Load the train data stored in path variable
train = pd.read_csv(path)
# Load the test data stored in path1 variable
test = | pd.read_csv(path1) | pandas.read_csv |
from __future__ import division #brings in Python 3.0 mixed type calculation rules
import datetime
import inspect
import numpy as np
import numpy.testing as npt
import os.path
import pandas as pd
import sys
from tabulate import tabulate
import unittest
##find parent directory and import model
#parentddir = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
#sys.path.append(parentddir)
from ..agdrift_exe import Agdrift
test = {}
class TestAgdrift(unittest.TestCase):
"""
IEC unit tests.
"""
def setUp(self):
"""
setup the test as needed
e.g. pandas to open agdrift qaqc csv
Read qaqc csv and create pandas DataFrames for inputs and expected outputs
:return:
"""
pass
def tearDown(self):
"""
teardown called after each test
e.g. maybe write test results to some text file
:return:
"""
pass
def create_agdrift_object(self):
# create empty pandas dataframes to create empty object for testing
df_empty = pd.DataFrame()
# create an empty agdrift object
agdrift_empty = Agdrift(df_empty, df_empty)
return agdrift_empty
def test_validate_sim_scenarios(self):
"""
:description determines if user defined scenarios are valid for processing
:param application_method: type of Tier I application method employed
:param aquatic_body_def: type of endpoint of concern (e.g., pond, wetland); implies whether
: endpoint of concern parameters (e.g.,, pond width) are set (i.e., by user or EPA standard)
:param drop_size_*: qualitative description of spray droplet size for aerial & ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of orchard being sprayed
:NOTE we perform an additional validation check related to distances later in the code just before integration
:return
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.out_sim_scenario_chk = pd.Series([], dtype='object')
expected_result = pd.Series([
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Aquatic Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Aquatic Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Valid Tier I Aquatic Airblast Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid Tier I Aquatic Aerial Scenario',
'Invalid Tier I Aquatic Ground Scenario',
'Invalid Tier I Aquatic Airblast Scenario',
'Invalid Tier I Terrestrial Aerial Scenario',
'Valid Tier I Terrestrial Ground Scenario',
'Valid Tier I Terrestrial Airblast Scenario',
'Invalid scenario ecosystem_type',
'Invalid Tier I Aquatic Assessment application method',
'Invalid Tier I Terrestrial Assessment application method'],dtype='object')
try:
#set test data
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.application_method = pd.Series(
['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'tier_1_ground',
'tier_1_airblast',
'tier_1_aerial',
'Tier II Aerial',
'Tier III Aerial'], dtype='object')
agdrift_empty.ecosystem_type = pd.Series(
['aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'terrestrial_assessment',
'Field Assessment',
'aquatic_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(
['epa_defined_pond',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'epa_defined_wetland',
'NaN',
'user_defined_pond',
'NaN',
'user_defined_wetland',
'NaN',
'Defined Pond',
'user_defined_pond',
'epa_defined_pond',
'NaN',
'NaN',
'NaN',
'epa_defined_pond',
'user_defined_wetland',
'user_defined_pond'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(
['NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'epa_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'user_defined_terrestrial',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'user_defined_terrestrial',
'user_defined_terrestrial',
'NaN',
'NaN',
'user_defined_terrestrial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(
['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'fine_to_medium',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'medium_to_coarse',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine Indeed',
'NaN',
'very_fine_to_medium',
'medium_to_coarse',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'NaN',
'fine_to_medium-coarse',
'very_fine',
'NaN',
'very_fine_to_medium',
'NaN',
'very_fine'], dtype='object')
agdrift_empty.boom_height = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'low',
'high',
'low',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'high',
'NaN',
'NaN',
'NaN',
'NaN'],dtype='object')
agdrift_empty.airblast_type = pd.Series(
['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'orchard',
'vineyard',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'vineyard',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.validate_sim_scenarios()
result = agdrift_empty.out_sim_scenario_chk
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_set_sim_scenario_id(self):
"""
:description provides scenario ids per simulation that match scenario names (i.e., column_names) from SQL database
:param out_sim_scenario_id: scenario name as assigned to individual simulations
:param num_simulations: number of simulations to assign scenario names
:param out_sim_scenario_chk: from previous method where scenarios were checked for validity
:param application_method: application method of scenario
:param drop_size_*: qualitative description of spray droplet size for aerial and ground applications
:param boom_height: qualitative height above ground of spray boom
:param airblast_type: type of airblast application (e.g., vineyard, orchard)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result = pd.Series(['aerial_vf2f',
'aerial_f2m',
'aerial_m2c',
'aerial_c2vc',
'ground_low_vf',
'ground_low_fmc',
'ground_high_vf',
'ground_high_fmc',
'airblast_normal',
'airblast_dense',
'airblast_sparse',
'airblast_vineyard',
'airblast_orchard',
'Invalid'], dtype='object')
try:
agdrift_empty.num_simulations = len(expected_result)
agdrift_empty.out_sim_scenario_chk = pd.Series(['Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Aerial',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Ground',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Valid Tier I Airblast',
'Invalid Scenario'], dtype='object')
agdrift_empty.application_method = pd.Series(['tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_aerial',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_ground',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_airblast',
'tier_1_aerial'], dtype='object')
agdrift_empty.drop_size_aerial = pd.Series(['very_fine_to_fine',
'fine_to_medium',
'medium_to_coarse',
'coarse_to_very_coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.drop_size_ground = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'very_fine',
'fine_to_medium-coarse',
'very_fine',
'fine_to_medium-coarse',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.boom_height = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'low',
'low',
'high',
'high',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN'], dtype='object')
agdrift_empty.airblast_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'NaN',
'normal',
'dense',
'sparse',
'vineyard',
'orchard',
'NaN'], dtype='object')
agdrift_empty.set_sim_scenario_id()
result = agdrift_empty.out_sim_scenario_id
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_assign_column_names(self):
"""
:description assigns column names (except distaqnce column) from sql database to internal scenario names
:param column_name: short name for pesiticide application scenario for which distance vs deposition data is provided
:param scenario_name: internal variable for holding scenario names
:param scenario_number: index for scenario_name (this method assumes the distance values could occur in any column
:param distance_name: internal name for the column holding distance data
:NOTE to test both outputs of this method I simply appended them together
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
agdrift_empty.scenario_name = pd.Series([], dtype='object')
expected_result = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard'], dtype='object')
try:
agdrift_empty.column_names = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc',
'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse',
'airblast_vineyard', 'airblast_orchard', 'distance_ft'])
#call method to assign scenario names
agdrift_empty.assign_column_names()
result = agdrift_empty.scenario_name
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_distances(self):
"""
:description retrieves distance values for deposition scenario datasets
: all scenarios use same distances
:param num_db_values: number of distance values to be retrieved
:param distance_name: name of column in sql database that contains the distance values
:NOTE any blank fields are filled with 'nan'
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
expected_result = pd.Series([], dtype='float')
try:
expected_result = [0.,0.102525,0.20505,0.4101,0.8202,1.6404,3.2808,4.9212,6.5616,9.8424,13.1232,19.6848,26.2464,
32.808,39.3696,45.9312,52.4928,59.0544,65.616,72.1776,78.7392,85.3008,91.8624,98.424,104.9856,
111.5472,118.1088,124.6704,131.232,137.7936,144.3552,150.9168,157.4784,164.04,170.6016,177.1632,
183.7248,190.2864,196.848,203.4096,209.9712,216.5328,223.0944,229.656,236.2176,242.7792,249.3408,
255.9024,262.464,269.0256,275.5872,282.1488,288.7104,295.272,301.8336,308.3952,314.9568,321.5184,
328.08,334.6416,341.2032,347.7648,354.3264,360.888,367.4496,374.0112,380.5728,387.1344,393.696,
400.2576,406.8192,413.3808,419.9424,426.504,433.0656,439.6272,446.1888,452.7504,459.312,465.8736,
472.4352,478.9968,485.5584,492.12,498.6816,505.2432,511.8048,518.3664,524.928,531.4896,538.0512,
544.6128,551.1744,557.736,564.2976,570.8592,577.4208,583.9824,590.544,597.1056,603.6672,610.2288,
616.7904,623.352,629.9136,636.4752,643.0368,649.5984,656.16,662.7216,669.2832,675.8448,682.4064,
688.968,695.5296,702.0912,708.6528,715.2144,721.776,728.3376,734.8992,741.4608,748.0224,754.584,
761.1456,767.7072,774.2688,780.8304,787.392,793.9536,800.5152,807.0768,813.6384,820.2,826.7616,
833.3232,839.8848,846.4464,853.008,859.5696,866.1312,872.6928,879.2544,885.816,892.3776,898.9392,
905.5008,912.0624,918.624,925.1856,931.7472,938.3088,944.8704,951.432,957.9936,964.5552,971.1168,
977.6784,984.24,990.8016,997.3632]
agdrift_empty.distance_name = 'distance_ft'
agdrift_empty.num_db_values = len(expected_result)
result = agdrift_empty.get_distances(agdrift_empty.num_db_values)
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_scenario_deposition_data(self):
"""
:description retrieves deposition data for all scenarios from sql database
: and checks that for each the first, last, and total number of values
: are correct
:param scenario: name of scenario for which data is to be retrieved
:param num_values: number of values included in scenario datasets
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
#scenario_data = pd.Series([[]], dtype='float')
result = pd.Series([], dtype='float')
#changing expected values to the 161st
expected_result = [0.50013,0.041273,161.0, #aerial_vf2f
0.49997,0.011741,161.0, #aerial_f2m
0.4999,0.0053241,161.0, #aerial_m2c
0.49988,0.0031189,161.0, #aerial_c2vc
1.019339,9.66E-04,161.0, #ground_low_vf
1.007885,6.13E-04,161.0, #ground_low_fmc
1.055205,1.41E-03,161.0, #ground_high_vf
1.012828,7.72E-04,161.0, #ground_high_fmc
8.91E-03,3.87E-05,161.0, #airblast_normal
0.1155276,4.66E-04,161.0, #airblast_dense
0.4762651,5.14E-05,161.0, #airblast_sparse
3.76E-02,3.10E-05,161.0, #airblast_vineyard
0.2223051,3.58E-04,161.0] #airblast_orchard
try:
agdrift_empty.num_db_values = 161 #set number of data values in sql db
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
#agdrift_empty.db_name = 'sqlite_agdrift_distance.db'
#this is the list of scenario names (column names) in sql db (the order here is important because
#the expected values are ordered in this manner
agdrift_empty.scenario_name = ['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
#cycle through reading scenarios and building result list
for i in range(len(agdrift_empty.scenario_name)):
#get scenario data
scenario_data = agdrift_empty.get_scenario_deposition_data(agdrift_empty.scenario_name[i],
agdrift_empty.num_db_values)
print(scenario_data)
#extract 1st and last values of scenario data and build result list (including how many values are
#retrieved for each scenario
if i == 0:
#fix this
result = [scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))]
else:
result.extend([scenario_data[0], scenario_data[agdrift_empty.num_db_values - 1],
float(len(scenario_data))])
npt.assert_allclose(result, expected_result, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_get_column_names(self):
"""
:description retrieves column names from sql database (sqlite_agdrift_distance.db)
: (each column name refers to a specific deposition scenario;
: the scenario name is used later to retrieve the deposition data)
:parameter output name of sql database table from which to retrieve requested data
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
location = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__)))
agdrift_empty.db_name = os.path.join(location, 'sqlite_agdrift_distance.db')
agdrift_empty.db_table = 'output'
result = pd.Series([], dtype='object')
expected_result = ['distance_ft','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard']
try:
result = agdrift_empty.get_column_names()
npt.assert_array_equal(result, expected_result, err_msg="", verbose=True)
finally:
tab = [result, expected_result]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_filter_arrays(self):
"""
:description eliminate blank data cells (i.e., distances for which no deposition value is provided)
(and thus reduce the number of x,y values to be used)
:parameter x_in: array of distance values associated with values for a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter y_in: array of deposition values associated with a deposition scenario (e.g., Aerial/EPA Defined Pond)
:parameter x_out: processed array of x_in values eliminating indices of blank distance/deposition values
:parameter y_out: processed array of y_in values eliminating indices of blank distance/deposition values
:NOTE y_in array is assumed to be populated by values >= 0. except for the blanks as 'nan' entries
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_result_x = pd.Series([0.,1.,4.,5.,6.,7.], dtype='float')
expected_result_y = pd.Series([10.,11.,14.,15.,16.,17.], dtype='float')
try:
x_in = pd.Series([0.,1.,2.,3.,4.,5.,6.,7.], dtype='float')
y_in = pd.Series([10.,11.,'nan','nan',14.,15.,16.,17.], dtype='float')
x_out, y_out = agdrift_empty.filter_arrays(x_in, y_in)
result_x = x_out
result_y = y_out
npt.assert_allclose(result_x, expected_result_x, rtol=1e-5, atol=0, err_msg='', verbose=True)
npt.assert_allclose(result_y, expected_result_y, rtol=1e-5, atol=0, err_msg='', verbose=True)
finally:
tab = [result_x, expected_result_x]
tab = [result_y, expected_result_y]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_list_sims_per_scenario(self):
"""
:description scan simulations and count number and indices of simulations that apply to each scenario
:parameter num_scenarios number of deposition scenarios included in SQL database
:parameter num_simulations number of simulations included in this model execution
:parameter scenario_name name of deposition scenario as recorded in SQL database
:parameter out_sim_scenario_id identification of deposition scenario specified per model run simulation
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_num_sims = pd.Series([2,2,2,2,2,2,2,2,2,2,2,2,2], dtype='int')
expected_sim_indices = pd.Series([[0,13,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[1,14,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[2,15,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[3,16,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[4,17,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[5,18,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[6,19,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[7,20,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[8,21,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[9,22,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[10,23,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[11,24,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
[12,25,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], dtype='int')
try:
agdrift_empty.scenario_name = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.out_sim_scenario_id = pd.Series(['aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard','aerial_vf2f', 'aerial_f2m', 'aerial_m2c', 'aerial_c2vc',
'ground_low_vf', 'ground_low_fmc', 'ground_high_vf', 'ground_high_fmc',
'airblast_normal', 'airblast_dense', 'airblast_sparse', 'airblast_vineyard',
'airblast_orchard'], dtype='object')
agdrift_empty.num_simulations = len(agdrift_empty.out_sim_scenario_id)
agdrift_empty.num_scenarios = len(agdrift_empty.scenario_name)
result_num_sims, result_sim_indices = agdrift_empty.list_sims_per_scenario()
npt.assert_array_equal(result_num_sims, expected_num_sims, err_msg='', verbose=True)
npt.assert_array_equal(result_sim_indices, expected_sim_indices, err_msg='', verbose=True)
finally:
tab = [result_num_sims, expected_num_sims, result_sim_indices, expected_sim_indices]
print("\n")
print(inspect.currentframe().f_code.co_name)
print(tabulate(tab, headers='keys', tablefmt='rst'))
return
def test_determine_area_dimensions(self):
"""
:description determine relevant area/length/depth of waterbody or terrestrial area
:param i: simulation number
:param ecosystem_type: type of assessment to be conducted
:param aquatic_body_type: source of dimensional data for area (EPA or User defined)
:param terrestrial_field_type: source of dimensional data for area (EPA or User defined)
:param *_width: default or user specified width of waterbody or terrestrial field
:param *_length: default or user specified length of waterbody or terrestrial field
:param *_depth: default or user specified depth of waterbody or terrestrial field
:NOTE all areas, i.e., ponds, wetlands, and terrestrial fields are of 1 hectare size; the user can elect
to specify a width other than the default width but it won't change the area size; thus for
user specified areas the length is calculated and not specified by the user)
:return:
"""
# create empty pandas dataframes to create empty object for this unittest
agdrift_empty = self.create_agdrift_object()
expected_width = pd.Series([208.7, 208.7, 100., 400., 150., 0.], dtype='float')
expected_length = pd.Series([515.8, 515.8, 1076.39, 269.098, 717.593, 0.], dtype='float')
expected_depth = pd.Series([6.56, 0.4921, 7., 23., 0., 0.], dtype='float')
try:
agdrift_empty.ecosystem_type = pd.Series(['aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'aquatic_assessment',
'terrestrial_assessment',
'terrestrial_assessment'], dtype='object')
agdrift_empty.aquatic_body_type = pd.Series(['epa_defined_pond',
'epa_defined_wetland',
'user_defined_pond',
'user_defined_wetland',
'NaN',
'NaN'], dtype='object')
agdrift_empty.terrestrial_field_type = pd.Series(['NaN',
'NaN',
'NaN',
'NaN',
'user_defined_terrestrial',
'epa_defined_terrestrial'], dtype='object')
num_simulations = len(agdrift_empty.ecosystem_type)
agdrift_empty.default_width = 208.7
agdrift_empty.default_length = 515.8
agdrift_empty.default_pond_depth = 6.56
agdrift_empty.default_wetland_depth = 0.4921
agdrift_empty.user_pond_width = pd.Series(['NaN', 'NaN', 100., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_pond_depth = pd.Series(['NaN', 'NaN', 7., 'NaN', 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_width = pd.Series(['NaN', 'NaN', 'NaN', 400., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_wetland_depth = pd.Series(['NaN','NaN', 'NaN', 23., 'NaN', 'NaN'], dtype='float')
agdrift_empty.user_terrestrial_width = pd.Series(['NaN', 'NaN', 'NaN', 'NaN', 150., 'NaN'], dtype='float')
width_result = pd.Series(num_simulations * ['NaN'], dtype='float')
length_result = | pd.Series(num_simulations * ['NaN'], dtype='float') | pandas.Series |
#!/usr/bin/env python3
# coding: utf-8
"""Global sequencing data for the home page
Author: <NAME> - Vector Engineering Team (<EMAIL>)
"""
import argparse
import pandas as pd
import numpy as np
import json
from pathlib import Path
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--case-data", type=str, required=True, help="Path to case data CSV file",
)
parser.add_argument(
"--location-map",
type=str,
required=True,
help="Path to location map JSON file",
)
parser.add_argument(
"-o", "--output", type=str, required=True, help="Path to output directory",
)
args = parser.parse_args()
out_path = Path(args.output)
# Load case counts by country
case_count_df = pd.read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_confirmed_global.csv"
)
case_count_df.rename(columns={"Country/Region": "country"}, inplace=True)
# Upgrade some province/states to country/regions
upgrade_provinces = [
"Hong Kong",
"Macau",
"Faroe Islands",
"Greenland",
"French Guiana",
"French Polynesia",
"Guadeloupe",
"Martinique",
"Mayotte",
"New Caledonia",
"Reunion",
"Saint Barthelemy",
"Saint Pierre and Miquelon",
"St Martin",
"Aruba",
"Bonaire, Sint Eustatius and Saba",
"Curacao",
"Sint Maarten",
"Anguilla",
"Bermuda",
"British Virgin Islands",
"Cayman Islands",
"Falkland Islands (Malvinas)",
"Gibraltar",
"Isle of Man",
"Channel Islands",
"Montserrat",
"Turks and Caicos Islands",
"American Samoa",
"Guam",
"Northern Mariana Islands",
"Virgin Islands",
"Puerto Rico",
]
upgrade_province_inds = case_count_df["Province/State"].isin(upgrade_provinces)
case_count_df.loc[upgrade_province_inds, "country"] = case_count_df.loc[
upgrade_province_inds, "Province/State"
]
# Group by country/region
case_count_df = (
case_count_df.drop(columns=["Lat", "Long"])
.groupby("country")
.agg(np.sum)
.reset_index()
)
# Unpivot table
case_count_df = pd.melt(
case_count_df,
id_vars=["country"],
var_name="date",
value_name="cumulative_cases",
)
# Convert date strings to datetime objects
case_count_df["date"] = pd.to_datetime(case_count_df["date"])
case_count_df["month"] = case_count_df["date"].dt.to_period("M")
JHU_rename_map = {
"US": "USA",
"Congo (Kinshasa)": "DRC",
"Congo (Brazzaville)": "Republic of the Congo",
"Korea, South": "South Korea",
"Taiwan*": "Taiwan",
"Burma": "Myanmar",
# "Aruba": "Netherlands",
# "Bonaire, Sint Eustatius and Saba": "Netherlands",
# "Curacao": "Netherlands",
# "Sint Maarten": "Netherlands",
# "British Virgin Islands": "United Kingdom",
# "Channel Islands": "United Kingdom",
# "Cayman Islands": "United Kingdom",
# "Gibraltar": "United Kingdom",
# "Isle of Man": "United Kingdom",
# "Montserrat": "United Kingdom",
# "Turks and Caicos Islands": "United Kingdom",
# "Falkland Islands (Malvinas)": "United Kingdom",
# "Diamond Princess": "Japan",
# "Faroe Islands": "Denmark",
# "French Polynesia": "France",
# "Guadeloupe": "France",
# "Martinique": "France",
# "Mayotte": "France",
# "Reunion": "France",
# "New Caledonia": "France",
# "<NAME>": "France",
# "<NAME> and Miquelon": "France",
# "<NAME>": "France",
# "<NAME>": "Saint Martin",
# "MS Zaandam": "USA",
# "Marshall Islands": "USA",
# "Macau": "China",
}
def rename_countries(country):
if country in JHU_rename_map.keys():
return JHU_rename_map[country]
else:
return country
case_count_df["country"] = case_count_df["country"].apply(rename_countries)
case_count_df = (
case_count_df.groupby(["country", "month"])["cumulative_cases"]
.agg(np.max)
.reset_index()
)
case_count_df["month"] = case_count_df["month"].dt.start_time
case_count_df.to_json(str(out_path / "case_count.json"), orient="records")
case_df = pd.read_json(args.case_data).set_index("Accession ID")
case_df = case_df[["collection_date", "submission_date", "location_id"]]
location_map = pd.read_json(args.location_map)
case_df = case_df.join(location_map, on="location_id", how="left")
case_df["collection_date"] = pd.to_datetime(
case_df["collection_date"], errors="coerce"
)
case_df["submission_date"] = pd.to_datetime(
case_df["submission_date"], errors="coerce"
)
# Remove failed date parsing
case_df = case_df.loc[
(~pd.isnull(case_df["collection_date"]))
& (~pd.isnull(case_df["submission_date"]))
]
# Only take dates from 2019-12-15
case_df = case_df.loc[case_df["collection_date"] > pd.to_datetime("2019-12-15")]
# Calculate time deltas
case_df["turnaround_days"] = (
case_df["submission_date"] - case_df["collection_date"]
).dt.days
# Extract month
case_df["month"] = case_df["collection_date"].dt.to_period("M")
case_df["submission_month"] = case_df["submission_date"].dt.to_period("M")
# Remove invalid submission dates (negative turnaround times)
case_df = case_df.loc[case_df["turnaround_days"] >= 0]
# Upgrade provinces to countries
upgrade_inds = case_df["division"].isin(upgrade_provinces)
case_df.loc[upgrade_inds, "country"] = case_df.loc[upgrade_inds, "division"]
sequences_per_month = (
case_df.reset_index()
.groupby(["country", "month"])["Accession ID"]
.size()
.rename({"Palestine": "West Bank and Gaza"})
.rename("new_sequences")
.reset_index()
)
sequences_per_month["month"] = sequences_per_month["month"].dt.start_time
sequences_per_month.to_json(
str(out_path / "sequences_per_month.json"), orient="records"
)
turnaround_per_month = (
case_df.reset_index()
.groupby(["country", "submission_month"])["turnaround_days"]
.agg(
q5=lambda x: np.quantile(x, 0.05),
q25=lambda x: np.quantile(x, 0.25),
q50=lambda x: np.quantile(x, 0.50),
q75=lambda x: np.quantile(x, 0.75),
q95=lambda x: np.quantile(x, 0.95),
)
.reset_index()
)
turnaround_per_month["submission_month"] = turnaround_per_month[
"submission_month"
].dt.start_time
turnaround_per_month.to_json(
str(out_path / "turnaround_per_month.json"), orient="records"
)
# Load UID ISO FIPS lookup table
iso_lookup_df = pd.read_csv(
"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/UID_ISO_FIPS_LookUp_Table.csv"
)
# Upgrade provinces to country/regions
upgrade_inds = iso_lookup_df["Province_State"].isin(upgrade_provinces)
iso_lookup_df.rename(columns={"Country_Region": "country"}, inplace=True)
iso_lookup_df.loc[upgrade_inds, "country"] = iso_lookup_df.loc[
upgrade_inds, "Province_State"
]
# Only take countries, then set as the index
iso_lookup_df = (
iso_lookup_df.loc[
(upgrade_inds & pd.isnull(iso_lookup_df["Admin2"]))
| ( | pd.isnull(iso_lookup_df["Province_State"]) | pandas.isnull |
import datetime as dt
import unittest
import pandas as pd
import numpy as np
import numpy.testing as npt
import seaice.nasateam as nt
import seaice.tools.plotter.daily_extent as de
class Test_BoundingDateRange(unittest.TestCase):
def test_standard(self):
today = dt.date(2015, 9, 22)
month_bounds = (-3, 1)
expected_bounds = (dt.date(2015, 6, 1), dt.date(2015, 10, 31))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_year(self):
today = dt.date(2001, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2000, 12, 1), dt.date(2001, 2, 28))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
def test_bounding_dates_overlap_leap_year(self):
today = dt.date(2016, 1, 15)
month_bounds = (-1, 1)
expected_bounds = (dt.date(2015, 12, 1), dt.date(2016, 2, 29))
actual = de._bounding_date_range(today, *month_bounds)
self.assertEqual(expected_bounds, actual)
class Test_GetRecordYear(unittest.TestCase):
start_date = nt.BEGINNING_OF_SATELLITE_ERA
end_date = dt.date(2015, 12, 31)
date_index = pd.date_range(start_date, end_date)
base_series = pd.Series(index=date_index).fillna(5)
def _series(self, low=None, high=None, next_highest=None, next_lowest=None):
"""Return a series for easily testing record values. All the values are 5, with
different values set to the dates passed in as low, next_lowest, high,
and next_highest. The index of the returned series is from the beginning
of the satellite era to the end of 2015 (since that happens to be the
last complete year at the time of this writing).
"""
series = self.base_series.copy()
if high:
series[high] = 10
if next_highest:
series[next_highest] = 7
if next_lowest:
series[next_lowest] = 2
if low:
series[low] = 0
return series
def test_max(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002 , recordline:2002"""
series = self._series(high='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:9/2002(min) , recordline:2002"""
series = self._series(low='2002-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2002
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014, recordline:2010"""
series = self._series(high='2014-03-15', next_highest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_current_year_is_record(self):
"""Date: 4/2014, range: 1/2014 -> 5/2014, record:3/2014(min), recordline:2010"""
series = self._series(low='2014-03-15', next_lowest='2010-09-15')
date = pd.to_datetime('2014-04-15')
month_bounds = (-3, 1)
# expectation
expected = 2010
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_is_included_in_month_bounds(self):
"""Date: 2/2015, range: 10/2014 -> 3/2015, record: 1/2014, recordline: 2013-2014"""
series = self._series(low='2014-04-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-02-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_min_record_year_before_and_crossover_forward(self):
"""Date: 12/2015, range: 8/2015 -> 1/2016, record: 12/2014, recordline: 2014-2015"""
series = self._series(low='2014-09-20', next_lowest='1999-09-15')
date = pd.to_datetime('2015-12-15')
month_bounds = (-4, 1)
# expectation
expected = 2014
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004, recordline:2004"""
series = self._series(high='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_and_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:1/2004(min), recordline:2003-2004"""
series = self._series(low='2004-01-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2004
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(high='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_not_aligned(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2007 , recordline:2007-2008"""
series = self._series(low='2007-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2008
# execute
actual = de._get_record_year(series, date, month_bounds, 'min')
self.assertEqual(actual, expected)
def test_max_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(high='2009-11-27', next_highest='2004-11-27')
date = pd.to_datetime('2010-01-15')
month_bounds = (-3, 1)
# expectation
expected = 2005
# execute
actual = de._get_record_year(series, date, month_bounds, 'max')
self.assertEqual(actual, expected)
def test_min_year_changeover_record_is_plotted_with_current_year_plots_next_highest(self):
"""Date: 1/2010, range: 10/2009 -> 2/2010, record:11/2009 , recordline:2004-2005"""
series = self._series(low='2009-11-27', next_lowest='2004-11-27')
date = | pd.to_datetime('2010-01-15') | pandas.to_datetime |
import numpy as np
import pandas as pd
from IPython import embed
from keras.models import load_model
from keras import backend as K
from qlknn.models.ffnn import determine_settings, _prescale, clip_to_bounds
def rmse(y_true, y_pred):
return K.sqrt(K.mean(K.square( y_true-y_pred )))
class KerasNDNN():
def __init__(self, model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=None, feature_max=None,
target_min=None, target_max=None,
target_names_mask=None,
):
self.model = model
self._feature_names = feature_names
self._target_names = target_names
self._feature_prescale_factor = feature_prescale_factor
self._feature_prescale_bias = feature_prescale_bias
self._target_prescale_factor = target_prescale_factor
self._target_prescale_bias = target_prescale_bias
if feature_min is None:
feature_min = pd.Series({var: -np.inf for var in self._feature_names})
self._feature_min = feature_min
if feature_max is None:
feature_max = pd.Series({var: np.inf for var in self._feature_names})
self._feature_max = feature_max
if target_min is None:
target_min = pd.Series({var: -np.inf for var in self._target_names})
self._target_min = target_min
if target_max is None:
target_max = pd.Series({var: np.inf for var in self._target_names})
self._target_max = target_max
self._target_names_mask = target_names_mask
def get_output(self, inp, clip_low=False, clip_high=False, low_bound=None, high_bound=None, safe=True, output_pandas=True, shift_output_by=0):
"""
This should accept a pandas dataframe, and should return a pandas dataframe
"""
nn_input, safe, clip_low, clip_high, low_bound, high_bound = \
determine_settings(self, inp, safe, clip_low, clip_high, low_bound, high_bound)
nn_input = _prescale(nn_input,
self._feature_prescale_factor.values,
self._feature_prescale_bias.values)
# Apply all NN layers an re-scale the outputs
branched_in = [nn_input.loc[:, self._branch1_names].values,
nn_input.loc[:, self._branch2_names].values]
nn_out = self.model.predict(branched_in) # Get prediction
output = (nn_out - np.atleast_2d(self._target_prescale_bias)) / np.atleast_2d(self._target_prescale_factor)
output -= shift_output_by
output = clip_to_bounds(output, clip_low, clip_high, low_bound, high_bound)
if output_pandas:
output = pd.DataFrame(output, columns=self._target_names)
if self._target_names_mask is not None:
output.columns = self._target_names_mask
return output
class Daniel7DNN(KerasNDNN):
_branch1_names = ['Ati', 'An', 'q', 'smag', 'x', 'Ti_Te']
_branch2_names = ['Ate']
def __init__(self, model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=None, feature_max=None,
target_min=None, target_max=None,
target_names_mask=None,
):
super().__init__(model, feature_names, target_names,
feature_prescale_factor, feature_prescale_bias,
target_prescale_factor, target_prescale_bias,
feature_min=feature_min, feature_max=feature_max,
target_min=target_min, target_max=target_max,
target_names_mask=target_names_mask,
)
self.shift = self.find_shift()
@classmethod
def from_files(cls, model_file, standardization_file):
model = load_model(model_file, custom_objects={'rmse': rmse})
stds = pd.read_csv(standardization_file)
feature_names = pd.Series(cls._branch1_names + cls._branch2_names)
target_names = pd.Series(['efeETG_GB'])
stds.set_index('name', inplace=True)
# Was normalised to s=1, m=0
s_t = 1
m_t = 0
s_sf = stds.loc[feature_names, 'std']
s_st = stds.loc[target_names, 'std']
m_sf = stds.loc[feature_names, 'mean']
m_st = stds.loc[target_names, 'mean']
feature_scale_factor = s_t / s_sf
feature_scale_bias = -m_sf * s_t / s_sf + m_t
target_scale_factor = s_t / s_st
target_scale_bias = -m_st * s_t / s_st + m_t
return cls(model, feature_names, target_names,
feature_scale_factor, feature_scale_bias,
target_scale_factor, target_scale_bias,
)
def get_output(self, inp, clip_low=False, clip_high=False, low_bound=None, high_bound=None, safe=True, output_pandas=True, shift_output=True):
if shift_output:
shift_output_by = self.shift
else:
shift_output_by = 0
output = super().get_output(inp, clip_low=clip_low, clip_high=clip_high, low_bound=low_bound, high_bound=high_bound, safe=safe, output_pandas=output_pandas, shift_output_by=shift_output_by)
return output
def find_shift(self):
# Define a point where the relu is probably 0
nn_input = pd.DataFrame({'Ati': 0, 'An': 0, 'q': 3, 'smag': 3.5, 'x': 0.69, 'Ti_Te': 1, 'Ate': -100}, index=[0])
branched_in = [nn_input.loc[:, self._branch1_names].values,
nn_input.loc[:, self._branch2_names].values]
# Get a function to evaluate the network up until the relu layer
try:
func = K.function(self.model.input, [self.model.get_layer('TR').output])
except ValueError:
raise Exception("'TR' layer not defined, shifting only relevant for new-style NNs")
relu_out = func(branched_in)
if relu_out[0][0, 0] != 0:
raise Exception('Relu is not zero at presumed stable point! Cannot find shift')
nn_out = self.model.predict(branched_in)
output = (nn_out - np.atleast_2d(self._target_prescale_bias)) / np.atleast_2d(self._target_prescale_factor)
shift = output[0][0]
return shift
if __name__ == '__main__':
# Test the function
nn = Daniel7DNN.from_files('../../../IPP-Neural-Networks/Saved-Networks/2018-11-25_Run0161a.h5', 'standardizations_training.csv')
shift = nn.find_shift()
scann = 200
input = | pd.DataFrame() | pandas.DataFrame |
#!/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import shutil
import csv
import zipfile
import tarfile
import configparser
import collections
import statistics
import pandas as pd
import matplotlib.pyplot as plt
import networkx as nx
from datetime import datetime
# Type of printing.
OK = 'ok' # [*]
NOTE = 'note' # [+]
FAIL = 'fail' # [-]
WARNING = 'warn' # [!]
NONE = 'none' # No label.
# Type of train data.
OS = 0
WEB = 1
FRAMEWORK = 2
CMS = 3
class Creator:
def __init__(self, utility):
# Read config.ini.
self.utility = utility
config = configparser.ConfigParser()
self.file_name = os.path.basename(__file__)
self.full_path = os.path.dirname(os.path.abspath(__file__))
self.root_path = os.path.join(self.full_path, '../')
config.read(os.path.join(self.root_path, 'config.ini'))
# Define master signature file path.
master_sig_dir = os.path.join(self.root_path, config['Common']['signature_path'])
self.master_prod_sig = os.path.join(master_sig_dir, config['VersionChecker']['signature_file'])
self.master_cont_sig = os.path.join(master_sig_dir, config['ContentExplorer']['signature_file'])
self.pd_prod_sig = pd.read_csv(self.master_prod_sig,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
self.pd_cont_sig = pd.read_csv(self.master_cont_sig,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
self.delete_prod_row_index = []
self.delete_cont_row_index = []
# Define master train data path.
self.train_categories = config['VersionCheckerML']['category'].split('@')
train_dir = os.path.join(self.full_path, config['VersionCheckerML']['train_path'])
self.train_os_in = os.path.join(train_dir, config['VersionCheckerML']['train_os_in'])
self.train_web_in = os.path.join(train_dir, config['VersionCheckerML']['train_web_in'])
self.train_framework_in = os.path.join(train_dir, config['VersionCheckerML']['train_framework_in'])
self.train_cms_in = os.path.join(train_dir, config['VersionCheckerML']['train_cms_in'])
for category in self.train_categories:
if category == 'OS':
self.pd_train_os = pd.read_csv(self.train_os_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
elif category == 'WEB':
self.pd_train_web = pd.read_csv(self.train_web_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
elif category == 'FRAMEWORK':
self.pd_train_fw = pd.read_csv(self.train_framework_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
elif category == 'CMS':
self.pd_train_cms = pd.read_csv(self.train_cms_in,
delimiter='@',
encoding='utf-8',
header=None,
quoting=csv.QUOTE_NONE)
else:
self.utility.print_message(FAIL, 'Choose category is not found.')
exit(1)
self.delete_train_os_row_index = []
self.delete_train_web_row_index = []
self.delete_train_fw_row_index = []
self.delete_train_cms_row_index = []
self.compress_dir = os.path.join(self.root_path, config['Creator']['compress_dir'])
self.signature_dir = os.path.join(self.root_path, config['Creator']['signature_dir'])
self.prohibit_ext_list = config['Creator']['prohibit_ext'].split('@')
self.save_file = config['Creator']['result_file'].replace('*', datetime.now().strftime('%Y%m%d%H%M%S'))
self.save_path = os.path.join(self.signature_dir, self.save_file)
self.header = str(config['Creator']['header']).split('@')
self.score_table_path = os.path.join(self.full_path, config['Exploit']['data_path'])
self.score_table = os.path.join(self.score_table_path, config['Creator']['score_table'])
self.threshold = float(config['Creator']['threshold'])
self.unknown_score = float(config['Creator']['unknown_score'])
self.turn_inside_num = int(config['Creator']['turn_inside_num'])
if self.turn_inside_num > 2:
self.turn_inside_num = 2
self.try_othello_num = int(config['Creator']['try_othello_num'])
self.del_open_root_dir = int(config['Creator']['del_open_root_dir'])
# Check necessary directories.
self.is_dir_existance(self.compress_dir)
self.is_dir_existance(self.signature_dir)
# Load score table.
self.pd_score_table = pd.read_csv(self.score_table)
# Check necessary directory.
def is_dir_existance(self, target_dir):
if os.path.exists(target_dir) is False:
os.mkdir(target_dir)
self.utility.print_message(WARNING, 'Directory is not found: {}.'.format(target_dir))
self.utility.print_message(WARNING, 'Maked directory: {}.'.format(target_dir))
# Count directory layer.
def count_dir_layer(self, target_dir):
# Count directory number.
split_symbol = '/'
if os.name == 'nt':
split_symbol = '\\'
tmp_dir_list = os.path.splitdrive(target_dir)[1].split(split_symbol)
tmp_dir_list.remove('')
return len(tmp_dir_list), tmp_dir_list
# Grep.
def execute_grep(self, target_product, target_dir):
base_index = 0
report = []
if os.path.exists(target_dir):
for root, _, files in os.walk(target_dir):
file_count = 0
ext_list = []
for file in files:
msg = 'Check file : {}/{}'.format(root.replace(target_dir, '').replace('\\', '/'), file)
self.utility.print_message(OK, msg)
_, ext = os.path.splitext(file)
if ext[1:] not in self.prohibit_ext_list:
# Count file number and target extension.
file_count += 1
ext_list.append(ext[1:])
# Save information each directory.
record = []
record.insert(0, base_index)
record.insert(1, target_product)
record.insert(2, root.replace(target_dir, ''))
record.insert(3, list(set(ext_list)))
record.insert(4, collections.Counter(ext_list))
record.insert(5, list(set(files)))
report.append(record)
base_index += 1
# Save extracted information.
pd.DataFrame(report).to_csv(self.save_path, mode='a', header=False, index=False)
else:
self.utility.print_message(FAIL, 'Path or file is not found.\n=> {}'.format(target_dir))
sys.exit(1)
return report
# Show graph.
def show_graph(self, target, graph):
self.utility.print_message(NOTE, 'Creating network image...')
plt.figure(figsize=(10, 10))
nx.draw_networkx(graph)
plt.axis('off')
file_name = os.path.join(self.full_path, target + '.png')
plt.savefig(file_name)
plt.show()
# Calculate score of node.
def calc_score(self, ext_type):
score_list = []
for ext in ext_type:
# Get defined score from score table.
pd_score = self.pd_score_table[self.pd_score_table['extension'] == ext.lower()]
# Calculate score.
if len(pd_score) != 0:
if pd_score['probability'].values[0] == 1.0:
return 1.0
elif pd_score['probability'].values[0] == 0.0:
return 0.0
else:
score_list.append(pd_score['probability'].values[0])
else:
score_list.append(self.unknown_score)
return statistics.median(score_list)
# Return score of extension.
def return_score(self, files):
total_file_score = 0.0
for file in files:
_, ext = os.path.splitext(file)
pd_score = self.pd_score_table[self.pd_score_table['extension'] == ext[1:].lower()]
if len(pd_score) > 0:
total_file_score += pd_score['probability'].values[0]
return total_file_score
# Set node label.
def set_node_label(self, score):
label = 0.0
if score == 0.0:
label = 0.00
elif 0.1 <= score <= 0.3:
label = 0.25
elif 0.4 <= score <= 0.6:
label = 0.50
elif 0.7 < score <= 0.9:
label = 0.75
elif score == 1.0:
label = 1.00
return label
# Create Network using networkx.
def create_network(self, records):
# Create direction graph.
graph = nx.DiGraph()
dir_pool = {}
node_index = 0
for index, record in enumerate(records):
self.utility.print_message(NOTE, '{}/{} Analyzing "{}"'.format(index + 1, len(records), record[2]))
_, dirs = self.count_dir_layer(record[2])
parent_dir = ''
label = '\\'
for layer_index, dir_name in enumerate(dirs):
label += str(dir_name) + '\\'
# Set parent node.
if label in dir_pool.keys():
parent_dir = label
else:
# Calculate score and classification.
score = 0.0
if len(record[3]) != 0:
score = self.calc_score(record[3])
rank = self.set_node_label(score)
# Add new node within attributes.
dir_pool[label] = node_index
graph.add_node(node_index,
path=record[2],
ext_type=record[3],
ext_count=record[4],
files=record[5],
score=score,
rank=rank)
node_index += 1
# Create edge that connecting two nodes.
if parent_dir != '' and label != parent_dir:
graph.add_edge(dir_pool[parent_dir], dir_pool[label])
msg = 'Create edge node.{} <-> node.{}'.format(dir_pool[parent_dir], dir_pool[label])
self.utility.print_message(OK, msg)
return graph
# Extract tar file.
def extract_tar(self, file, path):
with tarfile.open(file) as tf:
tf.extractall(path)
# Extract zip file.
def extract_zip(self, file, path):
with zipfile.ZipFile(file) as zf:
zf.extractall(os.path.join(path))
# Decompress compressed package file.
def decompress_file(self, package_path):
# Extract path and file name from target directory.
self.utility.print_message(NOTE, 'Starting decompress: {}.'.format(package_path))
# Create extraction directory name.
extract_dir_name = ''
if '.tar' in os.path.splitext(package_path)[0]:
extract_dir_name = os.path.splitext(package_path)[0]
else:
extract_dir_name = os.path.splitext(package_path)[0].replace('.tar', '')
try:
# Execute extraction.
if '.tar' in package_path:
self.utility.print_message(OK, 'Decompress... : {}'.format(package_path))
self.extract_tar(package_path, extract_dir_name)
elif '.zip' in package_path:
self.utility.print_message(OK, 'Decompress... : {}'.format(package_path))
self.extract_zip(package_path, extract_dir_name)
except Exception as e:
self.utility.print_exception(e, '{}'.format(e.args))
return extract_dir_name
# Explore open path.
def explore_open_path(self, graph, all_paths):
open_paths = []
for idx, path in enumerate(all_paths):
tmp_open_paths = []
close_path_index = len(path) - 1
self.utility.print_message(NOTE, '{}/{} Explore path: {}'.format(idx + 1, len(all_paths), path))
for idx2, node_index in enumerate(path[::-1]):
msg = 'Checking turn inside node.{}:{}'.format(node_index, graph.nodes[node_index]['path'])
self.utility.print_message(OK, msg)
# Add open path.
rank = graph.nodes[node_index]['rank']
if graph.nodes[node_index]['rank'] >= self.threshold:
self.utility.print_message(OK, 'Add node {} to open path list.'.format(node_index))
tmp_open_paths.append([node_index, graph.nodes[node_index]['path'], rank])
# Set close path index.
close_path_index = len(path) - idx2 - 2
# Execute "Othello".
elif 0 < (len(path) - idx2 - 1) < len(path) - 1:
# Extract ranks of parent and child node.
parent_node_rank = graph.nodes[path[len(path) - idx2 - 2]]['rank']
child_node_rank = graph.nodes[path[len(path) - idx2]]['rank']
# Checking turn inside the node rank.
if parent_node_rank >= self.threshold and child_node_rank >= self.threshold:
msg = 'Turned inside rank={} -> 1.0.'.format(graph.nodes[node_index]['rank'])
self.utility.print_message(WARNING, msg)
self.utility.print_message(WARNING, 'Add node {} to open path list.'.format(node_index))
tmp_open_paths.append([node_index, graph.nodes[node_index]['path'], 1.0])
graph.nodes[node_index]['rank'] = 1.0
# Set close path index.
close_path_index = len(path) - idx2 - 2
else:
if close_path_index < len(path) - idx2 - 1:
# Set close path index.
close_path_index = len(path) - idx2 - 1
# Do not execute "Othello".
else:
if close_path_index < len(path) - idx2 - 1:
# Set close path index.
close_path_index = len(path) - idx2 - 1
# Cut unnecessary path (root path -> open path).
if close_path_index != -1:
for tmp_path in tmp_open_paths:
delete_seq = len(graph.nodes[path[close_path_index]]['path'])
open_paths.append([tmp_path[0], tmp_path[1][delete_seq:], tmp_path[2]])
else:
open_paths.extend(tmp_open_paths)
return list(map(list, set(map(tuple, open_paths))))
# Add train data.
def add_train_data(self, category, vendor, prod_name, prod_ver, files, target_path):
category_list = []
vendor_list = []
prod_name_list = []
version_list = []
path_list = []
# Add train data info to temporally buffer.
if '@' not in target_path:
category_list.append(category)
vendor_list.append(vendor)
prod_name_list.append(prod_name)
version_list.append(prod_ver)
path_list.append('(' + target_path + ')')
# Add file path signature info to temporally buffer.
for file in files:
target_file = '(' + target_path + file + ')'
if '@' in target_file:
continue
category_list.append(category)
vendor_list.append(vendor)
prod_name_list.append(prod_name)
version_list.append(prod_ver)
path_list.append(target_file)
else:
self.utility.print_message(WARNING, 'This path is included special character "@": {}'.format(target_path))
return category_list, vendor_list, prod_name_list, version_list, path_list
# Push path signature to temporally buffer.
def push_path_sig(self, sig_path, category, vendor, prod, version, target_path):
if '@' not in target_path:
sig_path[0].append(category)
sig_path[1].append(vendor)
sig_path[2].append(prod)
sig_path[3].append(version)
sig_path[4].append(target_path)
sig_path[5].append('*')
sig_path[6].append('*')
sig_path[7].append('0')
else:
self.utility.print_message(WARNING, 'This path is included special character "@": {}'.format(target_path))
# Push file signature to temporally buffer.
def push_file_sig(self, sig_file, category, vendor, prod, version, target_file):
if '@' not in target_file:
sig_file[0].append(category)
sig_file[1].append(vendor)
sig_file[2].append(prod)
sig_file[3].append(version)
sig_file[4].append(target_file)
else:
self.utility.print_message(WARNING, 'This path is included special character "@": {}'.format(target_file))
# Push train data to temporally buffer.
def push_train_data(self, train, category, categories, vendors, prods, versions, targets):
train[category][0].extend(categories)
train[category][1].extend(vendors)
train[category][2].extend(prods)
train[category][3].extend(versions)
train[category][4].extend(targets)
# Check existing signature of same product.
def is_existing_same_product(self, category, vendor, prod_name, version):
ret = True
# Check file signature.
df_extract_sig = self.pd_prod_sig[(self.pd_prod_sig[1] == vendor) &
(self.pd_prod_sig[2] == prod_name) &
(self.pd_prod_sig[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same product signature: {}/{}/{}'.format(vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
# Check path signature.
df_extract_sig = self.pd_cont_sig[(self.pd_cont_sig[1] == vendor) &
(self.pd_cont_sig[2] == prod_name) &
(self.pd_cont_sig[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same path signature: {}/{}/{}'.format(vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
# Check train data.
if category == 'OS':
df_extract_sig = self.pd_train_os[(self.pd_train_os[1] == vendor) &
(self.pd_train_os[2] == prod_name) &
(self.pd_train_os[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
elif category == 'WEB':
df_extract_sig = self.pd_train_web[(self.pd_train_web[1] == vendor) &
(self.pd_train_web[2] == prod_name) &
(self.pd_train_web[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
elif category == 'FRAMEWORK':
df_extract_sig = self.pd_train_fw[(self.pd_train_fw[1] == vendor) &
(self.pd_train_fw[2] == prod_name) &
(self.pd_train_fw[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
elif category == 'CMS':
df_extract_sig = self.pd_train_cms[(self.pd_train_cms[1] == vendor) &
(self.pd_train_cms[2] == prod_name) &
(self.pd_train_cms[3] == version)]
if len(df_extract_sig) != 0:
msg = 'Existing same {} train data: {}/{}/{}'.format(category, vendor, prod_name, version)
self.utility.print_message(FAIL, msg)
ret = False
return ret
# Main control.
def extract_file_structure(self, category, vendor, package):
# Check package path.
package_path = os.path.join(self.compress_dir, package)
if os.path.exists(package_path) is False:
self.utility.print_message(FAIL, 'Package is not found: {}.'.format(package_path))
return
# Extract product name and version.
# ex) Package name must be "wordpress_4.9.8_.tar.gz".
package_info = package.split('@')
prod_name = ''
prod_ver = ''
if len(package_info) < 2:
prod_name = package_info[0]
prod_ver = 'unknown'
else:
prod_name = package_info[0]
prod_ver = package_info[1]
# Check existing same product signature.
if self.is_existing_same_product(category, vendor, prod_name, prod_ver) is False:
return
# Decompress compressed package file.
extract_path = self.decompress_file(package_path)
# Create unique root directory.
root_dir = os.path.join(self.compress_dir, prod_name + '_' + prod_ver)
if os.path.exists(root_dir):
shutil.rmtree(root_dir)
os.mkdir(root_dir)
shutil.move(extract_path, root_dir)
# Create report header.
pd.DataFrame([], columns=self.header).to_csv(self.save_path, mode='w', index=False)
# Extract file structures.
try:
# Extract file path each products.
target_name = prod_name + ' ' + prod_ver
self.utility.print_message(NOTE, 'Extract package {}'.format(root_dir))
record = self.execute_grep(target_name, root_dir)
graph = self.create_network(record)
# Extract all paths to end node from root node.
all_paths = []
node_num = len(graph._adj)
for end_node_idx in range(node_num):
msg = '{}/{} Analyzing node={}'.format(end_node_idx + 1, node_num, end_node_idx)
self.utility.print_message(OK, msg)
if len(graph._adj[end_node_idx]) == 0:
for path in nx.all_simple_paths(graph, source=0, target=end_node_idx):
msg = 'Extract path that source={} <-> target={}, path={}'.format(0, end_node_idx, path)
self.utility.print_message(OK, msg)
all_paths.append(path)
# Execute "Othello".
open_paths = []
for try_num in range(self.try_othello_num):
self.utility.print_message(OK, '{}/{} Execute "Othello".'.format(try_num + 1, self.try_othello_num))
open_paths.extend(self.explore_open_path(graph, all_paths))
# Create signature.
open_paths = list(map(list, set(map(tuple, open_paths))))
# Initialize temporally buffer.
sig_file = []
for _ in range(len(self.pd_prod_sig.columns)):
sig_file.append([])
sig_path = []
for _ in range(len(self.pd_cont_sig.columns)):
sig_path.append([])
train = []
for _ in range(len(self.train_categories)):
temp = []
for _ in range(len(self.pd_train_os.columns)):
temp.append([])
train.append(temp)
for idx, item in enumerate(open_paths):
# Create signature.
files = graph.nodes[item[0]]['files']
if item[2] == 1.0 and len(files) > 0:
# Create target path.
target_path = item[1].replace('\\', '/')
if target_path.endswith('/') is False:
target_path += '/'
# Add signature to master signature file.
if self.return_score(files) / len(files) == 1.0:
# Add path signature info to temporally buffer.
self.push_path_sig(sig_path, category, vendor, prod_name, prod_ver, target_path)
self.utility.print_message(OK, '{}/{} Add path signature: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Add file path signature info to temporally buffer.
for file in files:
target_file = '(' + target_path + file + ')'
self.push_file_sig(sig_file, category, vendor, prod_name, prod_ver, target_file)
self.utility.print_message(OK, '{}/{} Add file signature: {}.'.format(idx + 1,
len(open_paths),
target_file))
# Add extra path signature to master signature file.
tmp_target_path = target_path.split('/')
tmp_target_path = [s for s in tmp_target_path if s]
if len(tmp_target_path) > 1:
for path_idx in range(self.del_open_root_dir):
extra_target_path = '/'.join(tmp_target_path[path_idx + 1:])
extra_target_path = '/' + extra_target_path + '/'
self.push_path_sig(sig_path, category, vendor, prod_name, prod_ver, extra_target_path)
self.utility.print_message(OK, '{}/{} Add path signature: {}.'
.format(idx + 1, len(open_paths), extra_target_path))
# Add extra file path signature info to temporally buffer.
for file in files:
extra_target_file = '(' + extra_target_path + file + ')'
self.push_file_sig(sig_file, category, vendor, prod_name,
prod_ver, extra_target_file)
self.utility.print_message(OK, '{}/{} Add file signature: {}.'
.format(idx + 1, len(open_paths), extra_target_file))
else:
# Add train data info to temporally buffer.
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Add extra path signature to master signature file.
tmp_target_path = target_path.split('/')
tmp_target_path = [s for s in tmp_target_path if s]
if len(tmp_target_path) > 1:
for path_idx in range(self.del_open_root_dir):
extra_target_path = '/'.join(tmp_target_path[path_idx + 1:])
extra_target_path = '/' + extra_target_path + '/'
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
extra_target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Create train data.
elif item[2] >= self.threshold:
target_path = item[1].replace('\\', '/')
if target_path.endswith('/') is False:
target_path += '/'
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Add extra path signature to master signature file.
tmp_target_path = target_path.split('/')
tmp_target_path = [s for s in tmp_target_path if s]
if len(tmp_target_path) > 1:
for path_idx in range(self.del_open_root_dir):
extra_target_path = '/'.join(tmp_target_path[path_idx + 1:])
extra_target_path = '/' + extra_target_path + '/'
categories, vendors, prods, versions, targets = self.add_train_data(category,
vendor,
prod_name,
prod_ver,
files,
extra_target_path)
if len(categories) == 0:
continue
if category == 'OS':
self.push_train_data(train, OS, categories, vendors, prods, versions, targets)
elif category == 'WEB':
self.push_train_data(train, WEB, categories, vendors, prods, versions, targets)
elif category == 'FRAMEWORK':
self.push_train_data(train, FRAMEWORK, categories, vendors, prods, versions, targets)
elif category == 'CMS':
self.push_train_data(train, CMS, categories, vendors, prods, versions, targets)
self.utility.print_message(OK, '{}/{} Add train data: {}.'.format(idx + 1,
len(open_paths),
target_path))
# Write path signature to master signature file.
if len(sig_path[0]) != 0:
series_category = pd.Series(sig_path[0])
series_vendor = pd.Series(sig_path[1])
series_prod = pd.Series(sig_path[2])
series_version = pd.Series(sig_path[3])
series_signature = pd.Series(sig_path[4])
series_dummy1 = pd.Series(sig_path[5])
series_dummy2 = pd.Series(sig_path[6])
series_dummy3 = pd.Series(sig_path[7])
temp_df = pd.DataFrame({0: series_category,
1: series_vendor,
2: series_prod,
3: series_version,
4: series_signature,
5: series_dummy1,
6: series_dummy2,
7: series_dummy3}, columns=None)
origin_num = len(self.pd_cont_sig)
self.pd_cont_sig = pd.concat([self.pd_cont_sig, temp_df])
self.pd_cont_sig = self.pd_cont_sig.drop_duplicates(subset=4, keep=False)
add_signature_num = len(self.pd_cont_sig) - origin_num
self.pd_cont_sig.sort_values(by=[0, 1, 2, 3, 4]).to_csv(self.master_cont_sig,
sep='@',
encoding='utf-8',
header=False,
index=False,
quoting=csv.QUOTE_NONE)
self.utility.print_message(NOTE, 'Add Path signature: {} items.'.format(add_signature_num))
# Write file signature to master signature file.
if len(sig_file[0]) != 0:
series_category = pd.Series(sig_file[0])
series_vendor = pd.Series(sig_file[1])
series_prod = pd.Series(sig_file[2])
series_version = pd.Series(sig_file[3])
series_signature = pd.Series(sig_file[4])
temp_df = pd.DataFrame({0: series_category,
1: series_vendor,
2: series_prod,
3: series_version,
4: series_signature}, columns=None)
origin_num = len(self.pd_prod_sig)
self.pd_prod_sig = pd.concat([self.pd_prod_sig, temp_df])
self.pd_prod_sig = self.pd_prod_sig.drop_duplicates(subset=4, keep=False)
add_signature_num = len(self.pd_prod_sig) - origin_num
self.pd_prod_sig.sort_values(by=[0, 1, 2, 3, 4]).to_csv(self.master_prod_sig,
sep='@',
encoding='utf-8',
header=False,
index=False,
quoting=csv.QUOTE_NONE)
self.utility.print_message(NOTE, 'Add File signature: {} items.'.format(add_signature_num))
# Write OS train data to master train data.
if len(train[OS][0]) != 0:
series_category = pd.Series(train[OS][0])
series_vendor = pd.Series(train[OS][1])
series_prod = pd.Series(train[OS][2])
series_version = | pd.Series(train[OS][3]) | pandas.Series |
# Copyright (c) 2018-2022, NVIDIA CORPORATION.
import numpy as np
import pandas as pd
import pytest
from pandas.api import types as ptypes
import cudf
from cudf.api import types as types
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, False),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, False),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), False),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), False),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), False),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, True),
(pd.CategoricalDtype, True),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), False),
(pd.Series(dtype="float"), False),
(pd.Series(dtype="complex"), False),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), True),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, True),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, False),
(cudf.Decimal64Dtype, False),
(cudf.Decimal32Dtype, False),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), True),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), False),
(cudf.Decimal64Dtype(5, 2), False),
(cudf.Decimal32Dtype(5, 2), False),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), False),
(cudf.Series(dtype="int"), False),
(cudf.Series(dtype="float"), False),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), True),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), False),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), False),
# TODO: Currently creating an empty Series of list type ignores the
# provided type and instead makes a float64 Series.
(cudf.Series([[1, 2], [3, 4, 5]]), False),
# TODO: Currently creating an empty Series of struct type fails because
# it uses a numpy utility that doesn't understand StructDtype.
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_categorical_dtype(obj, expect):
assert types.is_categorical_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, True),
(int, True),
(float, True),
(complex, True),
(str, False),
(object, False),
# NumPy types.
(np.bool_, True),
(np.int_, True),
(np.float64, True),
(np.complex128, True),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), True),
(np.int_(), True),
(np.float64(), True),
(np.complex128(), True),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), True),
(np.dtype("int"), True),
(np.dtype("float"), True),
(np.dtype("complex"), True),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), True),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), True),
(np.array([], dtype=np.complex128), True),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), True),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), True),
(pd.Series(dtype="complex"), True),
(pd.Series(dtype="str"), False),
(pd.Series(dtype="unicode"), False),
(pd.Series(dtype="datetime64[s]"), False),
(pd.Series(dtype="timedelta64[s]"), False),
(pd.Series(dtype="category"), False),
(pd.Series(dtype="object"), False),
# cuDF dtypes.
(cudf.CategoricalDtype, False),
(cudf.ListDtype, False),
(cudf.StructDtype, False),
(cudf.Decimal128Dtype, True),
(cudf.Decimal64Dtype, True),
(cudf.Decimal32Dtype, True),
(cudf.IntervalDtype, False),
# cuDF dtype instances.
(cudf.CategoricalDtype("a"), False),
(cudf.ListDtype(int), False),
(cudf.StructDtype({"a": int}), False),
(cudf.Decimal128Dtype(5, 2), True),
(cudf.Decimal64Dtype(5, 2), True),
(cudf.Decimal32Dtype(5, 2), True),
(cudf.IntervalDtype(int), False),
# cuDF objects
(cudf.Series(dtype="bool"), True),
(cudf.Series(dtype="int"), True),
(cudf.Series(dtype="float"), True),
(cudf.Series(dtype="str"), False),
(cudf.Series(dtype="datetime64[s]"), False),
(cudf.Series(dtype="timedelta64[s]"), False),
(cudf.Series(dtype="category"), False),
(cudf.Series(dtype=cudf.Decimal128Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal64Dtype(5, 2)), True),
(cudf.Series(dtype=cudf.Decimal32Dtype(5, 2)), True),
(cudf.Series([[1, 2], [3, 4, 5]]), False),
(cudf.Series([{"a": 1, "b": 2}, {"c": 3}]), False),
(cudf.Series(dtype=cudf.IntervalDtype(int)), False),
),
)
def test_is_numeric_dtype(obj, expect):
assert types.is_numeric_dtype(obj) == expect
@pytest.mark.parametrize(
"obj, expect",
(
# Base Python objects.
(bool(), False),
(int(), False),
(float(), False),
(complex(), False),
(str(), False),
("", False),
(r"", False),
(object(), False),
# Base Python types.
(bool, False),
(int, True),
(float, False),
(complex, False),
(str, False),
(object, False),
# NumPy types.
(np.bool_, False),
(np.int_, True),
(np.float64, False),
(np.complex128, False),
(np.str_, False),
(np.unicode_, False),
(np.datetime64, False),
(np.timedelta64, False),
# NumPy scalars.
(np.bool_(), False),
(np.int_(), True),
(np.float64(), False),
(np.complex128(), False),
(np.str_(), False),
(np.unicode_(), False),
(np.datetime64(), False),
(np.timedelta64(), False),
# NumPy dtype objects.
(np.dtype("bool"), False),
(np.dtype("int"), True),
(np.dtype("float"), False),
(np.dtype("complex"), False),
(np.dtype("str"), False),
(np.dtype("unicode"), False),
(np.dtype("datetime64"), False),
(np.dtype("timedelta64"), False),
(np.dtype("object"), False),
# NumPy arrays.
(np.array([], dtype=np.bool_), False),
(np.array([], dtype=np.int_), True),
(np.array([], dtype=np.float64), False),
(np.array([], dtype=np.complex128), False),
(np.array([], dtype=np.str_), False),
(np.array([], dtype=np.unicode_), False),
(np.array([], dtype=np.datetime64), False),
(np.array([], dtype=np.timedelta64), False),
(np.array([], dtype=object), False),
# Pandas dtypes.
(pd.core.dtypes.dtypes.CategoricalDtypeType, False),
(pd.CategoricalDtype, False),
# Pandas objects.
(pd.Series(dtype="bool"), False),
(pd.Series(dtype="int"), True),
(pd.Series(dtype="float"), False),
( | pd.Series(dtype="complex") | pandas.Series |
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import StratifiedKFold
import warnings
warnings.filterwarnings('ignore')
train_df = pd.read_csv('../input/train.csv')
test_df = pd.read_csv('../input/test.csv')
features = [c for c in train_df.columns if c not in ['ID_code', 'target']]
target = train_df['target']
param = {
'bagging_freq': 5, 'bagging_fraction': 0.331, 'boost_from_average':'false',
'boost': 'gbdt', 'feature_fraction': 0.0405, 'learning_rate': 0.0083,
'max_depth': -1, 'metric':'auc', 'min_data_in_leaf': 80,
'min_sum_hessian_in_leaf': 10.0,'num_leaves': 13, 'num_threads': 8,
'tree_learner': 'serial', 'objective': 'binary', 'verbosity': 1
}
folds = StratifiedKFold(n_splits=15, shuffle=False, random_state=2319)
oof = np.zeros(len(train_df))
predictions = np.zeros(len(test_df))
for fold_, (trn_idx, val_idx) in enumerate(folds.split(train_df.values, target.values)):
print("Fold {}".format(fold_))
trn_data = lgb.Dataset(train_df.iloc[trn_idx][features], label=target.iloc[trn_idx])
val_data = lgb.Dataset(train_df.iloc[val_idx][features], label=target.iloc[val_idx])
clf = lgb.train(param, trn_data, 1000000, valid_sets = [trn_data, val_data], verbose_eval=5000, early_stopping_rounds = 4000)
oof[val_idx] = clf.predict(train_df.iloc[val_idx][features], num_iteration=clf.best_iteration)
predictions += clf.predict(test_df[features], num_iteration=clf.best_iteration) / folds.n_splits
print("CV score: {:<8.5f}".format(roc_auc_score(target, oof)))
sub = | pd.DataFrame({"ID_code": test_df.ID_code.values}) | pandas.DataFrame |
# -*- coding: utf-8 -*-
from ..utils import get_drift, get_offset, verify_series
def rsi(close, length=None, drift=None, offset=None, **kwargs):
"""Indicator: Relative Strength Index (RSI)"""
# Validate arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 14
drift = get_drift(drift)
offset = get_offset(offset)
# Calculate Result
negative = close.diff(drift)
positive = negative.copy()
positive[positive < 0] = 0 # Make negatives 0 for the postive series
negative[negative > 0] = 0 # Make postives 0 for the negative series
positive_avg = positive.ewm(com=length, adjust=False).mean()
negative_avg = negative.ewm(com=length, adjust=False).mean().abs()
rsi = 100 * positive_avg / (positive_avg + negative_avg)
# Offset
if offset != 0:
rsi = rsi.shift(offset)
# Handle fills
if 'fillna' in kwargs:
rsi.fillna(kwargs['fillna'], inplace=True)
if 'fill_method' in kwargs:
rsi.fillna(method=kwargs['fill_method'], inplace=True)
# Name and Categorize it
rsi.name = f"RSI_{length}"
rsi.category = 'momentum'
return rsi
rsi.__doc__ = \
"""Relative Strength Index (RSI)
The Relative Strength Index is popular momentum oscillator used to measure the
velocity as well as the magnitude of directional price movements.
Sources:
https://www.tradingview.com/wiki/Relative_Strength_Index_(RSI)
Calculation:
Default Inputs:
length=14, drift=1
ABS = Absolute Value
EMA = Exponential Moving Average
positive = close if close.diff(drift) > 0 else 0
negative = close if close.diff(drift) < 0 else 0
pos_avg = EMA(positive, length)
neg_avg = ABS(EMA(negative, length))
RSI = 100 * pos_avg / (pos_avg + neg_avg)
Args:
close (pd.Series): Series of 'close's
length (int): It's period. Default: 1
drift (int): The difference period. Default: 1
offset (int): How many periods to offset the result. Default: 0
Kwargs:
fillna (value, optional): pd.DataFrame.fillna(value)
fill_method (value, optional): Type of fill method
Returns:
pd.Series: New feature generated.
"""
from pandas import DataFrame
from ..overlap.sma import sma
def stoch_rsi(close, length=None, smoothK=None, smoothD=None, drift=None, offset=None, **kwargs):
"""Indicator: Stochastic RSI (Stoch RSI)"""
# Validate arguments
close = verify_series(close)
length = int(length) if length and length > 0 else 14
smoothK = int(smoothK) if smoothK and smoothK > 0 else 3
smoothD = int(smoothD) if smoothD and smoothD > 0 else 3
# Calculate RSI
data_rsi = rsi(close, length=length, drift=drift,offset=offset, **kwargs)
# Calculate Result
min = data_rsi.rolling(length).min()
max = data_rsi.rolling(length).max()
stoch_rsi = 100 * (data_rsi - min) / (max - min)
stoch_rsi_k = sma(stoch_rsi, smoothK)
stoch_rsi_d = sma(stoch_rsi_k, smoothD)
stoch_rsi_k.name = f"STOCH_RSI_K_{smoothK}_{smoothD}_{length}"
stoch_rsi_d.name = f"STOCH_RSI_D_{smoothK}_{smoothD}_{length}"
stoch_rsi_k.category = stoch_rsi_d.category = 'momentum'
# Prepare DataFrame to return
data = {stoch_rsi_k.name: stoch_rsi_k, stoch_rsi_d.name: stoch_rsi_d}
stoch_rsif = | DataFrame(data) | pandas.DataFrame |